Skip to main content

ostd/specs/mm/
vm_space.rs

1use core::ops::Range;
2use vstd::pervasive::proof_from_false;
3use vstd::prelude::*;
4
5use vstd_extra::ownership::*;
6
7use crate::mm::frame::untyped::UFrame;
8use crate::mm::io::{VmIoMemView, VmIoOwner, VmReader, VmWriter};
9use crate::mm::page_prop::PageProperty;
10use crate::mm::page_table::*;
11use crate::mm::vm_space::{Cursor, CursorMut, MappedItem, UserPtConfig, VmSpace};
12use crate::mm::{Paddr, PagingConstsTrait, PagingLevel, Vaddr, MAX_USERSPACE_VADDR};
13use crate::specs::arch::mm::{current_page_table_paddr_spec, NR_LEVELS};
14use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
15use crate::specs::mm::page_table::cursor::owners::CursorOwner;
16use crate::specs::mm::page_table::cursor::CursorView;
17use crate::specs::mm::page_table::node::entry_owners::EntryOwner;
18use crate::specs::mm::page_table::{Guards, Mapping, OwnerSubtree, PageTableOwner, PageTableView};
19use crate::specs::mm::tlb::TlbModel;
20use crate::specs::mm::virt_mem_newer::{FrameContents, MemView};
21use crate::specs::task::InAtomicMode;
22
23verus! {
24
25/// This struct is used for reading/writing memories represented by the
26/// [`VmReader`] or [`VmWriter`]. We also require a valid `vmspace_owner`
27/// must be present in this struct to ensure that the reader/writer is
28/// not created out of thin air.
29pub tracked struct VmIoPermission<'a> {
30    pub vmio_owner: VmIoOwner<'a>,
31    pub vmspace_owner: VmSpaceOwner<'a>,
32}
33
34/// A tracked struct for reasoning about verification-only properties of a [`VmSpace`].
35///
36/// This struct serves as a bookkeeper for all _active_ readers/writers within a specific
37/// virtual memory space. It maintains a holistic view of the memory range covered by the
38/// VM space it is tracking using a [`Ghost<MemView>`]. It also maintains a [`Tracked<MemView>`]
39/// for the current memories it is holding permissions for, which is a subset of the total
40/// memory range.
41///
42/// The management of each reader/writer and their corresponding memory views and permissions
43/// must talk to this struct's APIs for properly taking and returning permissions, which ensures
44/// the consistency of the overall VM space so that, e.g., no memory-aliasing will occur during
45/// the lifetime of the readers/writers, and that no reader/writer will be created out of thin air
46/// without the permission from the VM space owner (or the verification rejects invalid requests).
47///
48/// # Lifecycle of a reader/writer
49///
50/// We briefly introduce how we manage the lifecycle of a reader/writer under the management of
51/// [`VmSpaceOwner`] in this section. In the first place we require that whenever the reader or
52/// writer is being used to read or write memory, a matching permission called [`VmIoOwner`] must
53/// be present. Thus the key is to properly manage the creation and deletion of the [`VmIoOwner`].
54///
55/// 1. **Creation**: To create a new reader/writer, we first check if the new reader/writer can be created
56///   under the current VM space owner using the APIs [`Self::can_create_reader`] and [`Self::can_create_writer`].
57///   Interestingly, this creates an empty reader/writer that doesn't hold any memory view yet.
58/// 2. **Activation**: This is the magic step where we assign a memory view to the reader/writer. Via
59///  [`Self::activate_reader`] and [`Self::activate_writer`],
60///   the permissions will be moved from the VM space owner to the reader/writer. Note that readers do not
61///   need owned permissions so they just borrow the memory view from the VM space owner, while writers need to
62///   take the ownership of the memory view from the VM space owner.
63///   After this step, the reader/writer is fully activated and can be used to read/write memory.
64/// 3. **Disposal**: After the reader/writer finishes the reading/writing operation, we can dispose it via
65///   [`Self::dispose_reader`] and [`Self::dispose_writer`]. This step is the reverse of activation, where the
66///   permissions will be moved back from the reader/writer to the VM space owner. After this step, the reader/writer
67///   is considered disposed but re-usable as long as it is properly activated again before use.
68/// 4. **Removal**: If we know for sure that the reader/writer will never be used again, we can remove it from the
69///    active list via [`Self::remove_reader`] and [`Self::remove_writer`].
70pub tracked struct VmSpaceOwner<'a> {
71    /// The owner of the page table of this VM space.
72    pub page_table_owner: OwnerSubtree<UserPtConfig>,
73    /// Whether this VM space is currently active.
74    pub active: bool,
75    /// Active readers for this VM space.
76    pub readers: Seq<VmIoOwner<'a>>,
77    /// Active writers for this VM space.
78    pub writers: Seq<VmIoOwner<'a>>,
79    /// The "actual" memory view of this VM space where some
80    /// of the mappings may be  transferred to the writers.
81    pub mem_view: Option<MemView>,
82    /// This is the holistic view of the memory range covered by this VM space owner.
83    pub mv_range: Ghost<Option<MemView>>,
84    /// Whether we allow shared reading.
85    pub shared_reader: bool,
86}
87
88impl<'a> Inv for VmSpaceOwner<'a> {
89    /// Defines the invariant for `VmSpaceOwner`.
90    ///
91    /// This specification ensures the consistency of the VM space, particularly
92    /// regarding memory access permissions and overlapping ranges.
93    ///
94    /// # Invariants
95    /// 1. **Recursion**: The underlying `page_table_owner` must satisfy its own invariant.
96    /// 2. **Finiteness**: The sets of readers and writers must be finite.
97    /// 3. **Active State Consistency**: If the VM space is marked as `active`:
98    ///    - **ID Separation**: A handle ID cannot be both a reader and a writer simultaneously.
99    ///    - **Element Validity**: All stored `VmIoOwner` instances must be valid and
100    ///                             their stored ID must match their map key.
101    ///    - **Memory Isolation (Read-Write)**: No Reader memory range may overlap with any Writer memory range.
102    ///    - **Memory Isolation (Write-Write)**: No Writer memory range may overlap with any other Writer memory range.
103    ///    - **Conditional Read Isolation**: If `shared_reader` is set, Readers must be mutually disjoint (cannot overlap).
104    open spec fn inv(self) -> bool {
105        &&& self.page_table_owner.inv()
106        &&& self.active ==> {
107            &&& self.mem_view_wf()
108            &&& self.mem_view matches Some(mem_view) ==> {
109                // Readers and writers are valid.
110                &&& forall|i: int|
111                    #![trigger self.readers[i]]
112                    0 <= i < self.readers.len() as int ==> {
113                        &&& self.readers[i].inv()
114                    }
115                &&& forall|i: int|
116                    #![trigger self.writers[i]]
117                    0 <= i < self.writers.len() as int ==> {
118                        &&& self.writers[i].inv()
119                    }
120                    // --- Memory Range Overlap Checks ---
121                    // Readers do not overlap with other readers, and writers do not overlap with other writers.
122                &&& forall|i, j: int|
123                    #![trigger self.readers[i], self.writers[j]]
124                    0 <= i < self.readers.len() as int && 0 <= j < self.writers.len() as int ==> {
125                        let r = self.readers[i];
126                        let w = self.writers[j];
127                        r.disjoint(w)
128                    }
129                &&& !self.shared_reader ==> forall|i, j: int|
130                    #![trigger self.readers[i], self.readers[j]]
131                    0 <= i < self.readers.len() as int && 0 <= j < self.readers.len() as int && i
132                        != j ==> {
133                        let r1 = self.readers[i];
134                        let r2 = self.readers[j];
135                        r1.disjoint(r2)
136                    }
137                &&& forall|i, j: int|
138                    #![trigger self.writers[i], self.writers[j]]
139                    0 <= i < self.writers.len() as int && 0 <= j < self.writers.len() as int && i
140                        != j ==> {
141                        let w1 = self.writers[i];
142                        let w2 = self.writers[j];
143                        w1.disjoint(w2)
144                    }
145            }
146        }
147    }
148}
149
150impl<'a> VmSpaceOwner<'a> {
151    /// This specification function ensures that the `mem_view` (remaining view),
152    /// `mv_range` (total view), and the views held by active readers and writers
153    /// maintain a consistent global state.
154    ///
155    /// The key properties include:
156    ///
157    /// ### 1. Existence Invariants
158    /// * `mem_view` is present if and only if `mv_range` is present.
159    ///
160    /// ### 2. Structural Integrity
161    /// * Both the remaining view and the total view must have finite memory mappings.
162    /// * Internal mappings within each view must be disjoint (no overlapping address ranges).
163    ///
164    /// ### 3. Global Consistency
165    /// * **Subset Relation**: The remaining view's mappings and memory domain must be subsets of the total view.
166    /// * **Translation Equality**: For any virtual address (VA), the address translation in the remaining view must match the translation in the total view.
167    ///
168    /// ### 4. Writer Invariants (Exclusive Ownership)
169    /// * **Type Verification**: Every writer must hold a `WriteView`.
170    /// * **Translation Consistency**: Writer translations must match the total view.
171    /// * **Mutual Exclusion**: If a writer translates a VA, that VA must not exist in the remaining view's translation table or memory domain.
172    /// * **Mapping Isolation**: Writer mappings must be disjoint from the remaining view's mappings and must be a subset of the total view's mappings.
173    ///
174    /// ### 5. Reader Invariants (Shared Consistency)
175    /// * **Type Verification**: Every reader must hold a `ReadView`.
176    /// * **Translation Consistency**: Reader translations must be consistent with the total view.
177    pub open spec fn mem_view_wf(self) -> bool {
178        &&& self.mem_view is Some
179            <==> self.mv_range@ is Some
180        // This requires that TotalMapping (mvv) = mv ∪ writer mappings ∪ reader mappings
181        &&& self.mem_view matches Some(remaining_view) ==> self.mv_range@ matches Some(total_view)
182            ==> {
183            &&& remaining_view.mappings_are_disjoint()
184            &&& remaining_view.mappings.finite()
185            &&& total_view.mappings_are_disjoint()
186            &&& total_view.mappings.finite()
187            // ======================
188            // Remaining Consistency
189            // ======================
190            &&& remaining_view.mappings.subset_of(total_view.mappings)
191            &&& remaining_view.memory.dom().subset_of(
192                total_view.memory.dom(),
193            )
194            // =====================
195            // Total View Consistency
196            // =====================
197            &&& forall|va: usize|
198                #![trigger remaining_view.addr_transl(va)]
199                #![trigger total_view.addr_transl(va)]
200                remaining_view.addr_transl(va) == total_view.addr_transl(
201                    va,
202                )
203            // =====================
204            // Writer correctness
205            // =====================
206            &&& forall|i: int|
207                #![trigger self.writers[i]]
208                0 <= i < self.writers.len() as int ==> {
209                    let writer = self.writers[i];
210
211                    &&& writer.mem_view matches Some(VmIoMemView::WriteView(writer_mv)) && {
212                        &&& forall|va: usize|
213                            #![trigger writer_mv.addr_transl(va)]
214                            #![trigger total_view.addr_transl(va)]
215                            #![trigger remaining_view.addr_transl(va)]
216                            #![trigger remaining_view.memory.contains_key(va)]
217                            {
218                                // We do not enforce that the range must be the same as the
219                                // memory view it holds as the writer may not consume all the
220                                // memory in its range.
221                                //
222                                // So we cannot directly reason on `self.range` here; we need
223                                // to instead ensure that the memory view it holds is consistent
224                                // with the total view and remaining view.
225                                &&& writer_mv.mappings.finite()
226                                &&& writer_mv.addr_transl(va) == total_view.addr_transl(va)
227                                &&& writer_mv.addr_transl(va) matches Some(_) ==> {
228                                    &&& remaining_view.addr_transl(va) is None
229                                    &&& !remaining_view.memory.contains_key(va)
230                                }
231                            }
232                        &&& writer_mv.mappings.disjoint(remaining_view.mappings)
233                        &&& writer_mv.mappings.subset_of(total_view.mappings)
234                        &&& writer_mv.memory.dom().subset_of(total_view.memory.dom())
235                    }
236                }
237                // =====================
238                // Reader correctness
239                // =====================
240            &&& forall|i: int|
241                #![trigger self.readers[i]]
242                0 <= i < self.readers.len() as int ==> {
243                    let reader = self.readers[i];
244
245                    &&& reader.mem_view matches Some(VmIoMemView::ReadView(reader_mv)) && {
246                        forall|va: usize|
247                            #![trigger reader_mv.addr_transl(va)]
248                            #![trigger total_view.addr_transl(va)]
249                            {
250                                // For readers there is no need to check remaining_view
251                                // because it is borrowed from remaining_view directly.
252                                &&& reader_mv.mappings.finite()
253                                &&& reader_mv.addr_transl(va) == total_view.addr_transl(va)
254                            }
255                    }
256                }
257        }
258    }
259
260    /// Determines whether a new reader can be safely instantiated within the VM address space.
261    ///
262    /// This specification function enforces memory isolation by ensuring that the
263    /// requested memory range does not intersect with the domain of any active writer.
264    pub open spec fn can_create_reader(&self, vaddr: Vaddr, len: usize) -> bool
265        recommends
266            self.inv(),
267    {
268        &&& forall|i: int|
269            #![trigger self.writers[i]]
270            0 <= i < self.writers.len() ==> !self.writers[i].overlaps_with_range(
271                vaddr..(vaddr + len) as usize,
272            )
273    }
274
275    /// Checks if we can create a new writer under this VM space owner.
276    ///
277    /// Similar to [`can_create_reader`], but checks both active readers and writers.
278    pub open spec fn can_create_writer(&self, vaddr: Vaddr, len: usize) -> bool
279        recommends
280            self.inv(),
281    {
282        &&& forall|i: int|
283            #![trigger self.readers[i]]
284            0 <= i < self.readers.len() ==> !self.readers[i].overlaps_with_range(
285                vaddr..(vaddr + len) as usize,
286            )
287        &&& forall|i: int|
288            #![trigger self.writers[i]]
289            0 <= i < self.writers.len() ==> !self.writers[i].overlaps_with_range(
290                vaddr..(vaddr + len) as usize,
291            )
292    }
293
294    /// Generates a new unique ID for VM IO owners.
295    ///
296    /// This assumes that we always generate a fresh ID that is not used by any existing
297    /// readers or writers. This should be safe as the ID space is unbounded and only used
298    /// to reason about different VM IO owners in verification.
299    pub uninterp spec fn new_vm_io_id(&self) -> nat
300        recommends
301            self.inv(),
302    ;
303
304    /// Activates the given reader to read data from the user space of the current task.
305    /// # Verified Properties
306    /// ## Preconditions
307    /// - The [`VmSpace`] invariants must hold with respect to the [`VmSpaceOwner`], which must be active.
308    /// - The reader must be well-formed with respect to the [`VmSpaceOwner`].
309    /// - The reader's virtual address range must be mapped within the [`VmSpaceOwner`]'s memory view.
310    /// ## Postconditions
311    /// - The reader will be added to the [`VmSpace`]'s readers list.
312    /// - The reader will be activated with a view of its virtual address range taken from the [`VmSpaceOwner`]'s memory view.
313    /// ## Safety
314    /// - The function preserves all memory invariants.
315    /// - The [`MemView`] invariants ensure that the reader has a consistent view of memory.
316    /// - The [`VmSpaceOwner`] invariants ensure that the viewed memory is owned exclusively by this [`VmSpace`].
317    #[inline(always)]
318    #[verus_spec(r =>
319        requires
320            old(self).mem_view matches Some(mv) &&
321                forall |va: usize|
322                #![auto]
323                    old(owner_r).range.start <= va < old(owner_r).range.end ==>
324                        mv.addr_transl(va) is Some
325            ,
326            old(self).inv(),
327            old(self).active,
328            reader.wf(*old(owner_r)),
329            old(owner_r).mem_view is None,
330            reader.inv(),
331        ensures
332            reader.wf(*final(owner_r)),
333            final(owner_r).mem_view == Some(VmIoMemView::ReadView(&old(self).mem_view@.unwrap().borrow_at_spec(
334                old(owner_r).range.start,
335                (old(owner_r).range.end - old(owner_r).range.start) as usize,
336            ))),
337    )]
338    pub proof fn activate_reader(tracked &'a mut self, reader: &'a VmReader<'a>, tracked owner_r: &'a mut VmIoOwner<'a>) {
339            let tracked mv = match self.mem_view {
340                Some(ref mv) => mv,
341                _ => { proof_from_false() },
342            };
343            let tracked borrowed_mv = mv.borrow_at(
344                owner_r.range.start,
345                (owner_r.range.end - owner_r.range.start) as usize,
346            );
347
348            owner_r.mem_view = Some(VmIoMemView::ReadView(borrowed_mv));
349
350            assert forall|va: usize|
351                #![auto]
352                owner_r.range.start <= va < owner_r.range.end implies borrowed_mv.addr_transl(
353                va,
354            ) is Some by {
355                if owner_r.range.start <= va && va < owner_r.range.end {
356                    assert(borrowed_mv.mappings =~= mv.mappings.filter(
357                        |m: Mapping|
358                            m.va_range.start < (owner_r.range.end) && m.va_range.end
359                                > owner_r.range.start,
360                    ));
361                    let o_borrow_mv = borrowed_mv.mappings.filter(
362                        |m: Mapping| m.va_range.start <= va < m.va_range.end,
363                    );
364                    let o_mv = mv.mappings.filter(
365                        |m: Mapping| m.va_range.start <= va < m.va_range.end,
366                    );
367                    assert(mv.addr_transl(va) is Some);
368                    assert(o_mv.len() > 0);
369                    let m = o_mv.choose();
370                    vstd::set::axiom_set_choose_len(o_mv);
371                    assert(o_mv.contains(m));
372                    assert(o_borrow_mv.contains(m));
373                    assert(o_borrow_mv.len() > 0);
374                }
375            }
376
377    }
378
379    /// Activates the given writer to write data to the user space of the current task.
380    /// # Verified Properties
381    /// ## Preconditions
382    /// - The [`VmSpace`] invariants must hold with respect to the [`VmSpaceOwner`], which must be active.
383    /// - The writer must be well-formed with respect to the `[VmSpaceOwner`].
384    /// - The writer's virtual address range must be mapped within the [`VmSpaceOwner`]'s memory view.
385    /// ## Postconditions
386    /// - The writer will be added to the [`VmSpace`]'s writers list.
387    /// - The writer will be activated with a view of its virtual address range taken from the [`VmSpaceOwner`]'s memory view.
388    /// ## Safety
389    /// - The function preserves all memory invariants.
390    /// - The [`MemView`] invariants ensure that the writer has a consistent view of memory.
391    /// - The [`VmSpaceOwner`] invariants ensure that the viewed memory is owned exclusively by
392    ///   this [`VmSpace`].
393    #[inline(always)]
394    #[verus_spec(r =>
395        requires
396            old(self).mem_view matches Some(mv) &&
397                forall |va: usize|
398                #![auto]
399                    old(owner_w).range.start <= va < old(owner_w).range.end ==>
400                        mv.addr_transl(va) is Some
401            ,
402            old(self).inv(),
403            old(self).active,
404            writer.wf(*old(owner_w)),
405            old(owner_w).mem_view is None,
406            writer.inv(),
407        ensures
408            writer.wf(*final(owner_w)),
409            final(owner_w).mem_view == Some(VmIoMemView::WriteView(old(self).mem_view@.unwrap().split_spec(
410                old(owner_w).range.start,
411                (old(owner_w).range.end - old(owner_w).range.start) as usize,
412            ).0)),
413    )]
414    pub proof fn activate_writer(tracked &mut self, writer: &'a VmWriter<'a>, tracked owner_w: &'a mut VmIoOwner<'a>) {
415            let tracked mut mv = self.mem_view.tracked_take();
416            let ghost old_mv = mv;
417            let tracked (lhs, rhs) = mv.split(
418                owner_w.range.start,
419                (owner_w.range.end - owner_w.range.start) as usize,
420            );
421
422            owner_w.mem_view = Some(VmIoMemView::WriteView(lhs));
423            self.mem_view = Some(rhs);
424
425            assert forall|va: usize|
426                #![auto]
427                owner_w.range.start <= va < owner_w.range.end implies lhs.addr_transl(
428                va,
429            ) is Some by {
430                if owner_w.range.start <= va && va < owner_w.range.end {
431                    assert(lhs.mappings =~= old_mv.mappings.filter(
432                        |m: Mapping|
433                            m.va_range.start < (owner_w.range.end) && m.va_range.end
434                                > owner_w.range.start,
435                    ));
436                    let o_lhs = lhs.mappings.filter(
437                        |m: Mapping| m.va_range.start <= va < m.va_range.end,
438                    );
439                    let o_mv = old_mv.mappings.filter(
440                        |m: Mapping| m.va_range.start <= va < m.va_range.end,
441                    );
442
443                    assert(old_mv.addr_transl(va) is Some);
444                    assert(o_mv.len() > 0);
445                    broadcast use vstd::set::axiom_set_choose_len;
446                    let m = o_mv.choose();
447                    assert(o_mv.contains(m));
448                    assert(m.va_range.start <= va < m.va_range.end);
449                    assert(o_lhs.contains(m));
450                    assert(o_lhs.len() > 0);
451                }
452            }
453
454    }
455
456    /// Removes the given reader from the active readers list.
457    ///
458    /// # Verified Properties
459    /// ## Preconditions
460    /// - The [`VmSpace`] invariants must hold with respect to the [`VmSpaceOwner`], which must be active.
461    /// - The index `idx` must be a valid index into the active readers list.
462    /// ## Postconditions
463    /// - The reader at index `idx` will be removed from the active readers list.
464    /// - The invariants of the [`VmSpaceOwner`] will still hold after the removal
465    pub proof fn remove_reader(tracked &mut self, idx: int)
466        requires
467            old(self).inv(),
468            old(self).active,
469            old(self).mem_view is Some,
470            0 <= idx < old(self).readers.len() as int,
471        ensures
472            final(self).inv(),
473            final(self).active == old(self).active,
474            final(self).shared_reader == old(self).shared_reader,
475            final(self).readers == old(self).readers.remove(idx),
476    {
477        self.readers.tracked_remove(idx);
478    }
479
480
481    /// Removes the given writer from the active writers list.
482    ///
483    /// # Verified Properties
484    /// ## Preconditions
485    /// - The [`VmSpace`] invariants must hold with respect to the [`VmSpaceOwner`], which must be active.
486    /// - The index `idx` must be a valid index into the active writers list.
487    /// ## Postconditions
488    /// - The writer at index `idx` will be removed from the active writers list.
489    /// - The memory view held by the removed writer will be returned to the VM space owner,
490    ///   ensuring that the overall memory view remains consistent.
491    /// - The invariants of the [`VmSpaceOwner`] will still hold after the removal.
492    pub proof fn remove_writer(tracked &mut self, idx: usize)
493        requires
494            old(self).inv(),
495            old(self).active,
496            old(self).mem_view is Some,
497            old(self).mv_range@ is Some,
498            0 <= idx < old(self).writers.len() as int,
499        ensures
500            final(self).inv(),
501            final(self).active == old(self).active,
502            final(self).shared_reader == old(self).shared_reader,
503            final(self).writers == old(self).writers.remove(idx as int),
504    {
505        let tracked writer = self.writers.tracked_remove(idx as int);
506
507        let tracked mv = match writer.mem_view {
508            Some(VmIoMemView::WriteView(mv)) => mv,
509            _ => { proof_from_false() },
510        };
511
512        let tracked mut remaining = self.mem_view.tracked_take();
513        let ghost old_remaining = remaining;
514        remaining.join(mv);
515        self.mem_view = Some(remaining);
516
517        assert(self.mem_view_wf()) by {
518            let ghost total_view = self.mv_range@.unwrap();
519
520            assert(remaining.mappings =~= old_remaining.mappings.union(mv.mappings));
521            assert(remaining.memory =~= old_remaining.memory.union_prefer_right(mv.memory));
522            assert(self.mv_range == old(self).mv_range);
523            assert(self.mem_view == Some(remaining));
524
525            assert forall|va: usize|
526                #![auto]
527                { remaining.addr_transl(va) == total_view.addr_transl(va) } by {
528                let r_mappings = remaining.mappings.filter(
529                    |m: Mapping| m.va_range.start <= va < m.va_range.end,
530                );
531                let t_mappings = total_view.mappings.filter(
532                    |m: Mapping| m.va_range.start <= va < m.va_range.end,
533                );
534                let w_mappings = mv.mappings.filter(
535                    |m: Mapping| m.va_range.start <= va < m.va_range.end,
536                );
537
538                assert(r_mappings.subset_of(t_mappings));
539                assert(w_mappings.subset_of(t_mappings));
540
541                if r_mappings.len() > 0 {
542                    let r = r_mappings.choose();
543                    vstd::set::axiom_set_choose_len(r_mappings);
544                    assert(r_mappings.contains(r));
545                    assert(t_mappings.contains(r));
546                    assert(t_mappings.len() > 0);
547                }
548            }
549
550            assert forall|i: int|
551                #![trigger self.writers[i]]
552                0 <= i < self.writers.len() as int implies {
553                let other_writer = self.writers[i];
554
555                &&& other_writer.mem_view matches Some(VmIoMemView::WriteView(writer_mv))
556                    && writer_mv.mappings.disjoint(remaining.mappings)
557            } by {
558                let other_writer = self.writers[i];
559
560                assert(old(self).inv());
561                let writer_mv = match other_writer.mem_view {
562                    Some(VmIoMemView::WriteView(mv)) => mv,
563                    _ => { proof_from_false() },
564                };
565
566                assert(exists|i: int|
567                    0 <= i < old(self).writers.len() as int ==> #[trigger] old(self).writers[i]
568                        == other_writer);
569                assert(exists|i: int|
570                    0 <= i < old(self).writers.len() as int ==> #[trigger] old(self).writers[i]
571                        == writer);
572                assert(mv.mappings.disjoint(writer_mv.mappings));
573            }
574        }
575    }
576
577    /// Disposes the given reader, releasing its ownership on the memory range.
578    ///
579    /// This does not mean that the owner is discarded; it indicates that someone
580    /// who finishes the reading operation can let us reclaim the permission.
581    /// The deletion of the reader is done via another API [`VmSpaceOwner::remove_reader`].
582    ///
583    /// Typically this API is called in two scenarios:
584    ///
585    /// 1. The reader has been created and we immediately move the ownership into us.
586    /// 2. The reader has finished the reading and need to return the ownership back.
587    pub proof fn dispose_reader(tracked &mut self, tracked owner: VmIoOwner<'a>)
588        requires
589            owner.inv(),
590            old(self).inv(),
591            old(self).active,
592            old(self).mv_range@ matches Some(total_view) && owner.mem_view matches Some(
593                VmIoMemView::ReadView(mv),
594            ) && old(self).mem_view matches Some(remaining) && mv.mappings.finite() && {
595                forall|va: usize|
596                    #![auto]
597                    {
598                        &&& total_view.addr_transl(va) == mv.addr_transl(va)
599                        &&& mv.mappings.finite()
600                    }
601            },
602            forall|i: int|
603                #![trigger old(self).writers[i]]
604                0 <= i < old(self).writers.len() ==> old(self).writers[i].disjoint(owner),
605            forall|i: int|
606                #![trigger old(self).readers[i]]
607                0 <= i < old(self).readers.len() ==> old(self).readers[i].disjoint(owner),
608        ensures
609            final(self).inv(),
610            final(self).active == old(self).active,
611            final(self).shared_reader == old(self).shared_reader,
612            owner.range@.start < owner.range@.end ==> final(self).readers == old(self).readers.push(owner),
613    {
614        if owner.range@.start < owner.range@.end {
615            // Return the memory view back to the vm space owner.
616            self.readers.tracked_push(owner);
617        }
618    }
619
620    /// Disposes the given writer, releasing its ownership on the memory range.
621    ///
622    /// This does not mean that the owner is discarded; it indicates that someone
623    /// who finishes the writing operation can let us reclaim the permission.
624    ///
625    /// The deletion of the writer is through another API [`VmSpaceOwner::remove_writer`].
626    pub proof fn dispose_writer(tracked &mut self, tracked owner: VmIoOwner<'a>)
627        requires
628            old(self).inv(),
629            old(self).active,
630            owner.inv(),
631            old(self).mv_range@ matches Some(total_view) && owner.mem_view matches Some(
632                VmIoMemView::WriteView(mv),
633            ) && old(self).mem_view matches Some(remaining) && mv.mappings.finite() && {
634                &&& forall|va: usize|
635                    #![auto]
636                    {
637                        &&& mv.addr_transl(va) == total_view.addr_transl(va)
638                        &&& mv.addr_transl(va) matches Some(_) ==> {
639                            &&& remaining.addr_transl(va) is None
640                            &&& !remaining.memory.contains_key(va)
641                        }
642                    }
643                &&& mv.mappings.disjoint(remaining.mappings)
644                &&& mv.mappings.subset_of(total_view.mappings)
645                &&& mv.memory.dom().subset_of(total_view.memory.dom())
646            },
647            forall|i: int|
648                #![trigger old(self).writers[i]]
649                0 <= i < old(self).writers.len() as int ==> old(self).writers[i].disjoint(owner),
650            forall|i: int|
651                #![trigger old(self).readers[i]]
652                0 <= i < old(self).readers.len() as int ==> old(self).readers[i].disjoint(owner),
653        ensures
654            final(self).inv(),
655            final(self).active == old(self).active,
656            final(self).shared_reader == old(self).shared_reader,
657            owner.range.start < owner.range.end ==> final(self).writers == old(self).writers.push(owner),
658    {
659        // If the writer has consumed all the memory, nothing to do;
660        // just discard the writer and return the permission back to
661        // the vm space owner.
662        match owner.mem_view {
663            Some(VmIoMemView::WriteView(ref writer_mv)) => {
664                if owner.range.start < owner.range.end {
665                    self.writers.tracked_push(owner);
666                }
667            },
668            _ => {
669                assert(false);
670            },
671        }
672    }
673}
674
675impl<'a> VmSpace<'a> {
676    pub open spec fn reader_success_cond(self, vaddr: Vaddr, len: usize) -> bool {
677        &&& vaddr != 0 && len > 0 && vaddr + len <= MAX_USERSPACE_VADDR
678        &&& current_page_table_paddr_spec() == self.pt.root_paddr_spec()
679    }
680
681    pub open spec fn writer_requires(
682        &self,
683        vm_owner: VmSpaceOwner<'a>,
684        vaddr: Vaddr,
685        len: usize,
686    ) -> bool {
687        &&& vm_owner.inv()
688    }
689
690    pub open spec fn writer_success_cond(self, vaddr: Vaddr, len: usize) -> bool {
691        &&& vaddr != 0 && len > 0 && vaddr + len <= MAX_USERSPACE_VADDR
692        &&& current_page_table_paddr_spec() == self.pt.root_paddr_spec()
693    }
694}
695
696impl<'rcu, A: InAtomicMode> Cursor<'rcu, A> {
697    pub open spec fn query_success_requires(self) -> bool {
698        self.0.barrier_va.start <= self.0.va < self.0.barrier_va.end
699    }
700
701    pub open spec fn query_success_ensures(
702        self,
703        view: CursorView<UserPtConfig>,
704        range: Range<Vaddr>,
705        item: Option<MappedItem>,
706    ) -> bool {
707        if view.present() {
708            &&& item is Some
709            &&& view.query_item_spec(item.unwrap()) == Some(range)
710        } else {
711            &&& range.start == self.0.va
712            &&& item is None
713        }
714    }
715}
716
717impl<'a, A: InAtomicMode> CursorMut<'a, A> {
718    pub open spec fn map_cursor_requires(
719        self,
720        cursor_owner: CursorOwner<'a, UserPtConfig>,
721    ) -> bool {
722        &&& cursor_owner.in_locked_range()
723        &&& self.pt_cursor.inner.level < self.pt_cursor.inner.guard_level
724        &&& self.pt_cursor.inner.va < self.pt_cursor.inner.barrier_va.end
725    }
726
727    pub open spec fn item_wf(
728        self,
729        frame: UFrame,
730        prop: PageProperty,
731        entry_owner: EntryOwner<UserPtConfig>,
732        regions: MetaRegionOwners,
733    ) -> bool {
734        let item = MappedItem { frame: frame, prop: prop };
735        let (paddr, level, prop0) = UserPtConfig::item_into_raw_spec(item);
736        &&& prop == prop0
737        &&& entry_owner.frame.unwrap().mapped_pa == paddr
738        &&& entry_owner.frame.unwrap().prop == prop
739        &&& level <= UserPtConfig::HIGHEST_TRANSLATION_LEVEL()
740        &&& 1 <= level <= NR_LEVELS
741        &&& level < self.pt_cursor.inner.guard_level
742        &&& Child::Frame(paddr, level, prop0).wf(entry_owner)
743        &&& self.pt_cursor.inner.va + page_size(level) <= self.pt_cursor.inner.barrier_va.end
744        &&& entry_owner.inv()
745        &&& self.pt_cursor.inner.va % page_size(level) == 0
746        &&& crate::mm::page_table::CursorMut::<'a, UserPtConfig, A>::item_slot_in_regions(item, regions)
747    }
748
749    pub open spec fn map_item_ensures(
750        self,
751        frame: UFrame,
752        prop: PageProperty,
753        old_cursor_view: CursorView<UserPtConfig>,
754        cursor_view: CursorView<UserPtConfig>,
755    ) -> bool {
756        let item = MappedItem { frame: frame, prop: prop };
757        let (paddr, level, prop0) = UserPtConfig::item_into_raw_spec(item);
758        cursor_view == old_cursor_view.map_spec(paddr, page_size(level), prop)
759    }
760}
761
762}