ostd/mm/
vm_space.rs

1// SPDX-License-Identifier: MPL-2.0
2//! Virtual memory space management.
3//!
4//! The [`VmSpace`] struct is provided to manage the virtual memory space of a
5//! user. Cursors are used to traverse and modify over the virtual memory space
6//! concurrently. The VM space cursor [`self::Cursor`] is just a wrapper over
7//! the page table cursor, providing efficient, powerful concurrent accesses
8//! to the page table.
9use alloc::vec::Vec;
10use vstd::pervasive::{arbitrary, proof_from_false};
11use vstd::prelude::*;
12
13use crate::specs::mm::virt_mem_newer::{MemView, VirtPtr};
14
15use crate::error::Error;
16use crate::mm::frame::untyped::UFrame;
17use crate::mm::io::VmIoMemView;
18use crate::mm::page_table::*;
19use crate::mm::page_table::{EntryOwner, PageTableFrag, PageTableGuard};
20use crate::specs::arch::*;
21use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
22use crate::specs::mm::page_table::cursor::owners::CursorOwner;
23use crate::specs::mm::page_table::*;
24use crate::specs::task::InAtomicMode;
25use core::marker::PhantomData;
26use core::{ops::Range, sync::atomic::Ordering};
27use vstd_extra::ghost_tree::*;
28use crate::specs::mm::frame::mapping::meta_to_frame;
29
30use crate::{
31    //    cpu::{AtomicCpuSet, CpuSet, PinCurrentCpu},
32    //    cpu_local_cell,
33    mm::{
34        io::{VmIoOwner, VmReader, VmWriter},
35        //        io::Fallible,
36        // kspace::KERNEL_PAGE_TABLE,
37        // page_table,
38        //        tlb::{TlbFlushOp, TlbFlusher},
39        page_prop::PageProperty,
40        Paddr,
41        PagingConstsTrait,
42        PagingLevel,
43        Vaddr,
44        MAX_USERSPACE_VADDR,
45    },
46    prelude::*,
47    //    task::{atomic_mode::AsAtomicModeGuard, disable_preempt, DisabledPreemptGuard},
48};
49
50use alloc::sync::Arc;
51
52verus! {
53
54/// A virtual address space for user-mode tasks, enabling safe manipulation of user-space memory.
55///
56/// The `VmSpace` type provides memory isolation guarantees between user-space and
57/// kernel-space. For example, given an arbitrary user-space pointer, one can read and
58/// write the memory location referred to by the user-space pointer without the risk of
59/// breaking the memory safety of the kernel space.
60///
61/// # Task Association Semantics
62///
63/// As far as OSTD is concerned, a `VmSpace` is not necessarily associated with a task. Once a
64/// `VmSpace` is activated (see [`VmSpace::activate`]), it remains activated until another
65/// `VmSpace` is activated **possibly by another task running on the same CPU**.
66///
67/// This means that it's up to the kernel to ensure that a task's `VmSpace` is always activated
68/// while the task is running. This can be done by using the injected post schedule handler
69/// (see [`inject_post_schedule_handler`]) to always activate the correct `VmSpace` after each
70/// context switch.
71///
72/// If the kernel otherwise decides not to ensure that the running task's `VmSpace` is always
73/// activated, the kernel must deal with race conditions when calling methods that require the
74/// `VmSpace` to be activated, e.g., [`UserMode::execute`], [`VmSpace::reader`],
75/// [`VmSpace::writer`]. Otherwise, the behavior is unspecified, though it's guaranteed _not_ to
76/// compromise the kernel's memory safety.
77///
78/// # Memory Backing
79///
80/// A newly-created `VmSpace` is not backed by any physical memory pages. To
81/// provide memory pages for a `VmSpace`, one can allocate and map physical
82/// memory ([`UFrame`]s) to the `VmSpace` using the cursor.
83///
84/// A `VmSpace` can also attach a page fault handler, which will be invoked to
85/// handle page faults generated from user space.
86///
87/// [`inject_post_schedule_handler`]: crate::task::inject_post_schedule_handler
88/// [`UserMode::execute`]: crate::user::UserMode::execute
89#[rustc_has_incoherent_inherent_impls]
90pub struct VmSpace<'a> {
91    pub pt: PageTable<UserPtConfig>,
92    /// Whether we allow shared reading.
93    pub shared_reader: bool,
94    /// All readers belonging to this VM space.
95    pub readers: Vec<&'a VmReader<'a>>,
96    /// All writers belonging to this VM space.
97    pub writers: Vec<&'a VmWriter<'a>>,
98    //    cpus: AtomicCpuSet,
99    pub marker: PhantomData<&'a ()>,
100}
101
102impl Inv for VmSpace<'_> {
103    open spec fn inv(self) -> bool {
104        &&& forall|i: int|
105            #![trigger self.readers@[i]]
106            0 <= i < self.readers@.len() as int ==> self.readers@[i].inv()
107        &&& forall|i: int|
108            #![trigger self.writers@[i]]
109            0 <= i < self.writers@.len() as int ==> self.writers@[i].inv()
110    }
111}
112
113/// This struct is used for reading/writing memories represented by the
114/// [`VmReader`] or [`VmWriter`]. We also requrie a valid `vmspace_owner`
115/// must be present in this struct to ensure that the reader/writer is
116/// not created out of thin air.
117pub tracked struct VmIoPermission<'a> {
118    pub vmio_owner: VmIoOwner<'a>,
119    pub vmspace_owner: VmSpaceOwner<'a>,
120}
121
122/// A tracked struct for reasoning about verification-only properties of a [`VmSpace`].
123pub tracked struct VmSpaceOwner<'a> {
124    /// The owner of the page table of this VM space.
125    pub page_table_owner: OwnerSubtree<UserPtConfig>,
126    /// Whether this VM space is currently active.
127    pub active: bool,
128    /// Active readers for this VM space.
129    // pub readers: Map<nat, VmIoOwner<'a>>,
130    pub readers: Seq<VmIoOwner<'a>>,
131    /// Active writers for this VM space.
132    pub writers: Seq<VmIoOwner<'a>>,
133    /// The "actual" memory view of this VM space where some
134    /// of the mappings may be  transferred to the writers.
135    pub mem_view: Option<MemView>,
136    /// This is the holistic view of the memory range covered by this VM space owner.
137    pub mv_range: Ghost<Option<MemView>>,
138    /// Whether we allow shared reading.
139    pub shared_reader: bool,
140}
141
142impl<'a> Inv for VmSpaceOwner<'a> {
143    /// Defines the invariant for `VmSpaceOwner`.
144    ///
145    /// This specification ensures the consistency of the VM space, particularly
146    /// regarding memory access permissions and overlapping ranges.
147    ///
148    /// # Invariants
149    /// 1. **Recursion**: The underlying `page_table_owner` must satisfy its own invariant.
150    /// 2. **Finiteness**: The sets of readers and writers must be finite.
151    /// 3. **Active State Consistency**: If the VM space is marked as `active`:
152    ///    - **ID Separation**: A handle ID cannot be both a reader and a writer simultaneously.
153    ///    - **Element Validity**: All stored `VmIoOwner` instances must be valid and
154    ///                             their stored ID must match their map key.
155    ///    - **Memory Isolation (Read-Write)**: No Reader memory range may overlap with any Writer memory range.
156    ///    - **Memory Isolation (Write-Write)**: No Writer memory range may overlap with any other Writer memory range.
157    ///    - **Conditional Read Isolation**: If `shared_reader` is set, Readers must be mutually disjoint (cannot overlap).
158    open spec fn inv(self) -> bool {
159        &&& self.page_table_owner.inv()
160        &&& self.active ==> {
161            &&& self.mem_view_wf()
162            &&& self.mem_view matches Some(mem_view) ==> {
163                // Readers and writers are valid.
164                &&& forall|i: int|
165                    #![trigger self.readers[i]]
166                    0 <= i < self.readers.len() as int ==> {
167                        &&& self.readers[i].inv()
168                    }
169                &&& forall|i: int|
170                    #![trigger self.writers[i]]
171                    0 <= i < self.writers.len() as int ==> {
172                        &&& self.writers[i].inv()
173                    }
174                    // --- Memory Range Overlap Checks ---
175                    // Readers do not overlap with other readers, and writers do not overlap with other writers.
176                &&& forall|i, j: int|
177                    #![trigger self.readers[i], self.writers[j]]
178                    0 <= i < self.readers.len() as int && 0 <= j < self.writers.len() as int ==> {
179                        let r = self.readers[i];
180                        let w = self.writers[j];
181                        r.disjoint(w)
182                    }
183                &&& !self.shared_reader ==> forall|i, j: int|
184                    #![trigger self.readers[i], self.readers[j]]
185                    0 <= i < self.readers.len() as int && 0 <= j < self.readers.len() as int && i
186                        != j ==> {
187                        let r1 = self.readers[i];
188                        let r2 = self.readers[j];
189                        r1.disjoint(r2)
190                    }
191                &&& forall|i, j: int|
192                    #![trigger self.writers[i], self.writers[j]]
193                    0 <= i < self.writers.len() as int && 0 <= j < self.writers.len() as int && i
194                        != j ==> {
195                        let w1 = self.writers[i];
196                        let w2 = self.writers[j];
197                        w1.disjoint(w2)
198                    }
199            }
200        }
201    }
202}
203
204impl<'a> VmSpaceOwner<'a> {
205    pub open spec fn mem_view_wf(self) -> bool {
206        &&& self.mem_view is Some
207            <==> self.mv_range@ is Some
208        // This requires that TotalMapping (mvv) = mv ∪ writer mappings ∪ reader mappings
209        &&& self.mem_view matches Some(remaining_view) ==> self.mv_range@ matches Some(total_view)
210            ==> {
211            &&& remaining_view.mappings_are_disjoint()
212            &&& remaining_view.mappings.finite()
213            &&& total_view.mappings_are_disjoint()
214            &&& total_view.mappings.finite()
215            // ======================
216            // Remaining Consistency
217            // ======================
218            &&& remaining_view.mappings.subset_of(total_view.mappings)
219            &&& remaining_view.memory.dom().subset_of(
220                total_view.memory.dom(),
221            )
222            // =====================
223            // Total View Consistency
224            // =====================
225            &&& forall|va: usize|
226                #![trigger remaining_view.addr_transl(va)]
227                #![trigger total_view.addr_transl(va)]
228                remaining_view.addr_transl(va) == total_view.addr_transl(
229                    va,
230                )
231            // =====================
232            // Writer correctness
233            // =====================
234            &&& forall|i: int|
235                #![trigger self.writers[i]]
236                0 <= i < self.writers.len() as int ==> {
237                    let writer = self.writers[i];
238
239                    &&& writer.mem_view matches Some(VmIoMemView::WriteView(writer_mv)) && {
240                        &&& forall|va: usize|
241                            #![trigger writer_mv.addr_transl(va)]
242                            #![trigger total_view.addr_transl(va)]
243                            #![trigger remaining_view.addr_transl(va)]
244                            #![trigger remaining_view.memory.contains_key(va)]
245                            {
246                                // We do not enforce that the range must be the same as the
247                                // memory view it holds as the writer may not consume all the
248                                // memory in its range.
249                                //
250                                // So we cannot directly reason on `self.range` here; we need
251                                // to instead ensure that the memory view it holds is consistent
252                                // with the total view and remaining view.
253                                &&& writer_mv.mappings.finite()
254                                &&& writer_mv.addr_transl(va) == total_view.addr_transl(va)
255                                &&& writer_mv.addr_transl(va) matches Some(_) ==> {
256                                    &&& remaining_view.addr_transl(va) is None
257                                    &&& !remaining_view.memory.contains_key(va)
258                                }
259                            }
260                        &&& writer_mv.mappings.disjoint(remaining_view.mappings)
261                        &&& writer_mv.mappings.subset_of(total_view.mappings)
262                        &&& writer_mv.memory.dom().subset_of(total_view.memory.dom())
263                    }
264                }
265                // =====================
266                // Reader correctness
267                // =====================
268            &&& forall|i: int|
269                #![trigger self.readers[i]]
270                0 <= i < self.readers.len() as int ==> {
271                    let reader = self.readers[i];
272
273                    &&& reader.mem_view matches Some(VmIoMemView::ReadView(reader_mv)) && {
274                        forall|va: usize|
275                            #![trigger reader_mv.addr_transl(va)]
276                            #![trigger total_view.addr_transl(va)]
277                            {
278                                // For readers there is no need to check remaining_view
279                                // because it is borrowed from remaining_view directly.
280                                &&& reader_mv.mappings.finite()
281                                &&& reader_mv.addr_transl(va) == total_view.addr_transl(va)
282                            }
283                    }
284                }
285        }
286    }
287
288    /// The basic invariant between a VM space and its owner.
289    pub open spec fn inv_with(&self, vm_space: VmSpace<'a>) -> bool {
290        &&& self.shared_reader == vm_space.shared_reader
291        &&& self.readers.len() == vm_space.readers@.len()
292        &&& self.writers.len() == vm_space.writers@.len()
293        &&& forall|i: int|
294            #![trigger self.readers[i]]
295            #![trigger vm_space.readers@[i]]
296            0 <= i < vm_space.readers@.len() as int ==> {
297                &&& self.readers[i].inv_with_reader(*vm_space.readers@[i])
298            }
299        &&& forall|i: int|
300            #![trigger self.writers[i]]
301            #![trigger vm_space.writers@[i]]
302            0 <= i < vm_space.writers@.len() as int ==> {
303                &&& self.writers[i].inv_with_writer(*vm_space.writers@[i])
304            }
305    }
306
307    /// Checks if we can create a new reader under this VM space owner.
308    ///
309    /// This requires no active writers overlapping with the new reader.
310    pub open spec fn can_create_reader(&self, vaddr: Vaddr, len: usize) -> bool
311        recommends
312            self.inv(),
313    {
314        &&& forall|i: int|
315            #![trigger self.writers[i]]
316            0 <= i < self.writers.len() ==> !self.writers[i].overlaps_with_range(
317                vaddr..(vaddr + len) as usize,
318            )
319    }
320
321    /// Checks if we can create a new writer under this VM space owner.
322    ///
323    /// Similar to [`can_create_reader`], but checks both active readers and writers.
324    pub open spec fn can_create_writer(&self, vaddr: Vaddr, len: usize) -> bool
325        recommends
326            self.inv(),
327    {
328        &&& forall|i: int|
329            #![trigger self.readers[i]]
330            0 <= i < self.readers.len() ==> !self.readers[i].overlaps_with_range(
331                vaddr..(vaddr + len) as usize,
332            )
333        &&& forall|i: int|
334            #![trigger self.writers[i]]
335            0 <= i < self.writers.len() ==> !self.writers[i].overlaps_with_range(
336                vaddr..(vaddr + len) as usize,
337            )
338    }
339
340    // /// Generates a new unique ID for VM IO owners.
341    // ///
342    // /// This assumes that we always generate a fresh ID that is not used by any existing
343    // /// readers or writers. This should be safe as the ID space is unbounded and only used
344    // /// to reason about different VM IO owners in verification.
345    #[verifier::external_body]
346    #[verus_spec(r =>
347        requires
348            self.inv(),
349    )]
350    pub proof fn new_vm_io_id(&self) -> nat {
351        unimplemented!()
352    }
353
354    /// Removes the given reader from the active readers list.
355    pub proof fn remove_reader(tracked &mut self, idx: int)
356        requires
357            old(self).inv(),
358            old(self).active,
359            old(self).mem_view is Some,
360            0 <= idx < old(self).readers.len() as int,
361        ensures
362            self.inv(),
363            self.active == old(self).active,
364            self.shared_reader == old(self).shared_reader,
365            self.readers == old(self).readers.remove(idx),
366    {
367        self.readers.tracked_remove(idx);
368    }
369
370    /// Disposes the given reader, releasing its ownership on the memory range.
371    ///
372    /// This does not mean that the owner is discarded; it indicates that someone
373    /// who finishes the reading operation can let us reclaim the permission.
374    /// The deletion of the reader is done via another API [`VmSpaceOwner::remove_reader`].
375    ///
376    /// Typically this API is called in two scenarios:
377    ///
378    /// 1. The reader has been created and we immediately move the ownership into us.
379    /// 2. The reader has finished the reading and need to return the ownership back.
380    pub proof fn dispose_reader(tracked &mut self, tracked owner: VmIoOwner<'a>)
381        requires
382            owner.inv(),
383            old(self).inv(),
384            old(self).active,
385            old(self).mv_range@ matches Some(total_view) && owner.mem_view matches Some(
386                VmIoMemView::ReadView(mv),
387            ) && old(self).mem_view matches Some(remaining) && mv.mappings.finite() && {
388                forall|va: usize|
389                    #![auto]
390                    {
391                        &&& total_view.addr_transl(va) == mv.addr_transl(va)
392                        &&& mv.mappings.finite()
393                    }
394            },
395            forall|i: int|
396                #![trigger old(self).writers[i]]
397                0 <= i < old(self).writers.len() ==> old(self).writers[i].disjoint(owner),
398            forall|i: int|
399                #![trigger old(self).readers[i]]
400                0 <= i < old(self).readers.len() ==> old(self).readers[i].disjoint(owner),
401        ensures
402            self.inv(),
403            self.active == old(self).active,
404            self.shared_reader == old(self).shared_reader,
405            owner.range@.start < owner.range@.end ==> self.readers == old(self).readers.push(owner),
406    {
407        let tracked mv = match owner.mem_view {
408            Some(VmIoMemView::ReadView(mv)) => mv,
409            _ => { proof_from_false() },
410        };
411
412        if owner.range@.start < owner.range@.end {
413            // Return the memory view back to the vm space owner.
414            self.readers.tracked_push(owner);
415        }
416    }
417
418    /// Disposes the given writer, releasing its ownership on the memory range.
419    ///
420    /// This does not mean that the owner is discarded; it indicates that someone
421    /// who finishes the writing operation can let us reclaim the permission.
422    ///
423    /// The deletion of the writer is through another API [`VmSpaceOwner::remove_writer`].
424    pub proof fn dispose_writer(tracked &mut self, tracked owner: VmIoOwner<'a>)
425        requires
426            old(self).inv(),
427            old(self).active,
428            owner.inv(),
429            old(self).mv_range@ matches Some(total_view) && owner.mem_view matches Some(
430                VmIoMemView::WriteView(mv),
431            ) && old(self).mem_view matches Some(remaining) && mv.mappings.finite() && {
432                &&& forall|va: usize|
433                    #![auto]
434                    {
435                        &&& mv.addr_transl(va) == total_view.addr_transl(va)
436                        &&& mv.addr_transl(va) matches Some(_) ==> {
437                            &&& remaining.addr_transl(va) is None
438                            &&& !remaining.memory.contains_key(va)
439                        }
440                    }
441                &&& mv.mappings.disjoint(remaining.mappings)
442                &&& mv.mappings.subset_of(total_view.mappings)
443                &&& mv.memory.dom().subset_of(total_view.memory.dom())
444            },
445            forall|i: int|
446                #![trigger old(self).writers[i]]
447                0 <= i < old(self).writers.len() as int ==> old(self).writers[i].disjoint(owner),
448            forall|i: int|
449                #![trigger old(self).readers[i]]
450                0 <= i < old(self).readers.len() as int ==> old(self).readers[i].disjoint(owner),
451        ensures
452            self.inv(),
453            self.active == old(self).active,
454            self.shared_reader == old(self).shared_reader,
455            owner.range@.start < owner.range@.end ==> self.writers == old(self).writers.push(owner),
456    {
457        // If the writer has consumed all the memory, nothing to do;
458        // just discard the writer and return the permission back to
459        // the vm space owner.
460        match owner.mem_view {
461            Some(VmIoMemView::WriteView(ref writer_mv)) => {
462                if owner.range@.start < owner.range@.end {
463                    self.writers.tracked_push(owner);
464                }
465            },
466            _ => {
467                assert(false);
468            },
469        }
470    }
471
472    pub proof fn remove_writer(tracked &mut self, idx: usize)
473        requires
474            old(self).inv(),
475            old(self).active,
476            old(self).mem_view is Some,
477            old(self).mv_range@ is Some,
478            0 <= idx < old(self).writers.len() as int,
479        ensures
480            self.inv(),
481            self.active == old(self).active,
482            self.shared_reader == old(self).shared_reader,
483            self.writers == old(self).writers.remove(idx as int),
484    {
485        let tracked writer = self.writers.tracked_remove(idx as int);
486
487        // Now we need to "return" the memory view back to the vm space owner.
488        let tracked mv = match writer.mem_view {
489            Some(VmIoMemView::WriteView(mv)) => mv,
490            _ => { proof_from_false() },
491        };
492
493        // "Join" the memory view back.
494        let tracked mut remaining = self.mem_view.tracked_take();
495        let ghost old_remaining = remaining;
496        remaining.join(mv);
497        self.mem_view = Some(remaining);
498
499        assert(self.mem_view_wf()) by {
500            let ghost total_view = self.mv_range@.unwrap();
501
502            assert(remaining.mappings =~= old_remaining.mappings.union(mv.mappings));
503            assert(remaining.memory =~= old_remaining.memory.union_prefer_right(mv.memory));
504            assert(self.mv_range == old(self).mv_range);
505            assert(self.mem_view == Some(remaining));
506
507            assert forall|va: usize|
508                #![auto]
509                { remaining.addr_transl(va) == total_view.addr_transl(va) } by {
510                let r_mappings = remaining.mappings.filter(
511                    |m: Mapping| m.va_range.start <= va < m.va_range.end,
512                );
513                let t_mappings = total_view.mappings.filter(
514                    |m: Mapping| m.va_range.start <= va < m.va_range.end,
515                );
516                let w_mappings = mv.mappings.filter(
517                    |m: Mapping| m.va_range.start <= va < m.va_range.end,
518                );
519
520                assert(r_mappings.subset_of(t_mappings));
521                assert(w_mappings.subset_of(t_mappings));
522
523                if r_mappings.len() > 0 {
524                    assert(t_mappings.len() > 0) by {
525                        let r = r_mappings.choose();
526                        assert(r_mappings.contains(r)) by {
527                            vstd::set::axiom_set_choose_len(r_mappings);
528                        }
529                        assert(t_mappings.contains(r));
530                    }
531                }
532            }
533
534            assert forall|i: int|
535                #![trigger self.writers[i]]
536                0 <= i < self.writers.len() as int implies {
537                let other_writer = self.writers[i];
538
539                &&& other_writer.mem_view matches Some(VmIoMemView::WriteView(writer_mv))
540                    && writer_mv.mappings.disjoint(remaining.mappings)
541            } by {
542                let other_writer = self.writers[i];
543
544                assert(old(self).inv());
545                let writer_mv = match other_writer.mem_view {
546                    Some(VmIoMemView::WriteView(mv)) => mv,
547                    _ => { proof_from_false() },
548                };
549
550                assert(mv.mappings.disjoint(writer_mv.mappings)) by {
551                    assert(exists|i: int|
552                        0 <= i < old(self).writers.len() as int ==> #[trigger] old(self).writers[i]
553                            == other_writer);
554                    assert(exists|i: int|
555                        0 <= i < old(self).writers.len() as int ==> #[trigger] old(self).writers[i]
556                            == writer);
557                }
558            }
559        }
560    }
561}
562
563/// The configuration for user page tables.
564#[derive(Clone, Debug)]
565pub struct UserPtConfig {}
566
567/// The item that can be mapped into the [`VmSpace`].
568#[derive(Clone)]
569pub struct MappedItem {
570    pub frame: UFrame,
571    pub prop: PageProperty,
572}
573
574// SAFETY: `item_into_raw` and `item_from_raw` are implemented correctly,
575unsafe impl PageTableConfig for UserPtConfig {
576    open spec fn TOP_LEVEL_INDEX_RANGE_spec() -> Range<usize> {
577        0..256
578    }
579
580    open spec fn TOP_LEVEL_CAN_UNMAP_spec() -> (b: bool) {
581        true
582    }
583
584    fn TOP_LEVEL_INDEX_RANGE() -> Range<usize> {
585        0..256
586    }
587
588    fn TOP_LEVEL_CAN_UNMAP() -> (b: bool) {
589        true
590    }
591
592    type E = PageTableEntry;
593
594    type C = PagingConsts;
595
596    type Item = MappedItem;
597
598    uninterp spec fn item_into_raw_spec(item: Self::Item) -> (Paddr, PagingLevel, PageProperty);
599
600    #[verifier::external_body]
601    fn item_into_raw(item: Self::Item) -> (Paddr, PagingLevel, PageProperty) {
602        unimplemented!()
603    }
604
605    uninterp spec fn item_from_raw_spec(
606        paddr: Paddr,
607        level: PagingLevel,
608        prop: PageProperty,
609    ) -> Self::Item;
610
611    #[verifier::external_body]
612    fn item_from_raw(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self::Item {
613        unimplemented!()
614    }
615}
616
617type Result<A> = core::result::Result<A, Error>;
618
619#[verus_verify]
620impl<'a> VmSpace<'a> {
621    pub uninterp spec fn new_spec() -> Self;
622
623    pub open spec fn reader_requires(
624        &self,
625        vm_owner: VmSpaceOwner<'a>,
626        vaddr: Vaddr,
627        len: usize,
628    ) -> bool {
629        &&& self.inv()
630        &&& vm_owner.inv_with(*self)
631        &&& vm_owner.inv()
632        &&& vm_owner.active
633        &&& vm_owner.can_create_reader(vaddr, len)
634        &&& vaddr != 0 && len > 0 && vaddr + len <= MAX_USERSPACE_VADDR_SPEC()
635        &&& current_page_table_paddr_spec() == self.pt.root_paddr_spec()
636    }
637
638    pub open spec fn writer_requires(
639        &self,
640        vm_owner: VmSpaceOwner<'a>,
641        vaddr: Vaddr,
642        len: usize,
643    ) -> bool {
644        &&& self.inv()
645        &&& vm_owner.inv_with(*self)
646        &&& vm_owner.inv()
647        &&& vm_owner.active
648        &&& vm_owner.can_create_writer(vaddr, len)
649        &&& vaddr != 0 && len > 0 && vaddr + len <= MAX_USERSPACE_VADDR_SPEC()
650        &&& current_page_table_paddr_spec() == self.pt.root_paddr_spec()
651    }
652
653    pub open spec fn reader_ensures(
654        &self,
655        vm_owner_old: VmSpaceOwner<'_>,
656        vm_owner_new: VmSpaceOwner<'_>,
657        vaddr: Vaddr,
658        len: usize,
659        r: Result<VmReader<'_>>,
660        r_owner: Option<VmIoOwner<'_>>,
661    ) -> bool {
662        &&& vm_owner_new.inv()
663        &&& vm_owner_new.readers == vm_owner_old.readers
664        &&& vm_owner_new.writers == vm_owner_old.writers
665        &&& r matches Ok(reader) && r_owner matches Some(owner) ==> {
666            &&& reader.inv()
667            &&& owner.inv_with_reader(reader)
668            &&& owner.mem_view is None
669        }
670    }
671
672    pub open spec fn writer_ensures(
673        &self,
674        vm_owner_old: VmSpaceOwner<'a>,
675        vm_owner_new: VmSpaceOwner<'a>,
676        vaddr: Vaddr,
677        len: usize,
678        r: Result<VmWriter<'a>>,
679        r_owner: Option<VmIoOwner<'a>>,
680    ) -> bool {
681        &&& vm_owner_new.inv()
682        &&& vm_owner_new.readers == vm_owner_old.readers
683        &&& vm_owner_new.writers == vm_owner_old.writers
684        &&& r matches Ok(writer) && r_owner matches Some(owner) ==> {
685            &&& writer.inv()
686            &&& owner.inv_with_writer(writer)
687            &&& owner.mem_view is None
688        }
689    }
690
691    /// Creates a new VM address space.
692    #[inline]
693    #[verifier::external_body]
694    #[verifier::when_used_as_spec(new_spec)]
695    #[verus_spec(r =>
696        ensures
697            r == Self::new_spec(),
698            r.inv(),
699    )]
700    pub fn new() -> Self {
701        unimplemented!()
702    }
703
704    /// Gets an immutable cursor in the virtual address range.
705    ///
706    /// The cursor behaves like a lock guard, exclusively owning a sub-tree of
707    /// the page table, preventing others from creating a cursor in it. So be
708    /// sure to drop the cursor as soon as possible.
709    ///
710    /// The creation of the cursor may block if another cursor having an
711    /// overlapping range is alive.
712    #[verifier::external_body]
713    pub fn cursor<G: InAtomicMode>(&'a self, guard: &'a G, va: &Range<Vaddr>) -> Result<
714        Cursor<'a, G>,
715    > {
716        Ok(self.pt.cursor(guard, va).map(|pt_cursor| Cursor(pt_cursor.0))?)
717    }
718
719    /// Gets an mutable cursor in the virtual address range.
720    ///
721    /// The same as [`Self::cursor`], the cursor behaves like a lock guard,
722    /// exclusively owning a sub-tree of the page table, preventing others
723    /// from creating a cursor in it. So be sure to drop the cursor as soon as
724    /// possible.
725    ///
726    /// The creation of the cursor may block if another cursor having an
727    /// overlapping range is alive. The modification to the mapping by the
728    /// cursor may also block or be overridden the mapping of another cursor.
729    pub fn cursor_mut<G: InAtomicMode>(&'a self, guard: &'a G, va: &Range<Vaddr>) -> Result<
730        CursorMut<'a, G>,
731    > {
732        Ok(
733            self.pt.cursor_mut(guard, va).map(
734                |pt_cursor|
735                    CursorMut {
736                        pt_cursor:
737                            pt_cursor.0,
738                        //            flusher: TlbFlusher::new(&self.cpus, disable_preempt()),
739                    },
740            )?,
741        )
742    }
743
744    /// Activates the given reader to read data from the user space of the current task.
745    #[inline(always)]
746    #[verus_spec(r =>
747        with
748            Tracked(vm_space_owner): Tracked<&'a mut VmSpaceOwner<'a>>,
749            Tracked(owner_r): Tracked<&'a mut VmIoOwner<'a>>,
750        requires
751            old(self).inv(),
752            old(vm_space_owner).mem_view matches Some(mv) &&
753                forall |va: usize|
754                #![auto]
755                    old(owner_r).range@.start <= va < old(owner_r).range@.end ==>
756                        mv.addr_transl(va) is Some
757            ,
758            old(vm_space_owner).inv_with(*old(self)),
759            old(vm_space_owner).inv(),
760            old(vm_space_owner).active,
761            old(owner_r).inv_with_reader(*reader),
762            old(owner_r).mem_view is None,
763            reader.inv(),
764        ensures
765            self.inv(),
766            self.shared_reader == old(self).shared_reader,
767            self.readers@ == old(self).readers@.push(reader),
768            owner_r.inv_with_reader(*reader),
769            owner_r.mem_view == Some(VmIoMemView::ReadView(&old(vm_space_owner).mem_view@.unwrap().borrow_at_spec(
770                old(owner_r).range@.start,
771                (old(owner_r).range@.end - old(owner_r).range@.start) as usize,
772            ))),
773    )]
774    pub fn activate_reader(&mut self, reader: &'a VmReader<'a>) {
775        self.readers.push(reader);
776
777        proof {
778            let tracked mv = match vm_space_owner.mem_view {
779                Some(ref mv) => mv,
780                _ => { proof_from_false() },
781            };
782            let tracked borrowed_mv = mv.borrow_at(
783                owner_r.range@.start,
784                (owner_r.range@.end - owner_r.range@.start) as usize,
785            );
786
787            owner_r.mem_view = Some(VmIoMemView::ReadView(borrowed_mv));
788
789            assert forall|va: usize|
790                #![auto]
791                owner_r.range@.start <= va < owner_r.range@.end implies borrowed_mv.addr_transl(
792                va,
793            ) is Some by {
794                if owner_r.range@.start <= va && va < owner_r.range@.end {
795                    assert(borrowed_mv.mappings =~= mv.mappings.filter(
796                        |m: Mapping|
797                            m.va_range.start < (owner_r.range@.end) && m.va_range.end
798                                > owner_r.range@.start,
799                    ));
800                    let o_borrow_mv = borrowed_mv.mappings.filter(
801                        |m: Mapping| m.va_range.start <= va < m.va_range.end,
802                    );
803                    let o_mv = mv.mappings.filter(
804                        |m: Mapping| m.va_range.start <= va < m.va_range.end,
805                    );
806                    assert(mv.addr_transl(va) is Some);
807                    assert(o_mv.len() > 0);
808                    assert(o_borrow_mv.len() > 0) by {
809                        let m = o_mv.choose();
810                        assert(o_mv.contains(m)) by {
811                            vstd::set::axiom_set_choose_len(o_mv);
812                        }
813                        assert(o_borrow_mv.contains(m));
814                    }
815                }
816            }
817        }
818    }
819
820    /// Activates the given writer to write data to the user space of the current task.
821    #[inline(always)]
822    #[verus_spec(r =>
823        with
824            Tracked(vm_space_owner): Tracked<&'a mut VmSpaceOwner<'a>>,
825            Tracked(owner_w): Tracked<&'a mut VmIoOwner<'a>>,
826        requires
827            old(self).inv(),
828            old(vm_space_owner).mem_view matches Some(mv) &&
829                forall |va: usize|
830                #![auto]
831                    old(owner_w).range@.start <= va < old(owner_w).range@.end ==>
832                        mv.addr_transl(va) is Some
833            ,
834            old(vm_space_owner).inv_with(*old(self)),
835            old(vm_space_owner).inv(),
836            old(vm_space_owner).active,
837            old(owner_w).inv_with_writer(*writer),
838            old(owner_w).mem_view is None,
839            writer.inv(),
840        ensures
841            self.inv(),
842            self.shared_reader == old(self).shared_reader,
843            self.writers@ == old(self).writers@.push(writer),
844            owner_w.inv_with_writer(*writer),
845            owner_w.mem_view == Some(VmIoMemView::WriteView(old(vm_space_owner).mem_view@.unwrap().split_spec(
846                old(owner_w).range@.start,
847                (old(owner_w).range@.end - old(owner_w).range@.start) as usize,
848            ).0)),
849    )]
850    pub fn activate_writer(&mut self, writer: &'a VmWriter<'a>) {
851        self.writers.push(writer);
852
853        proof {
854            let tracked mut mv = vm_space_owner.mem_view.tracked_take();
855            let ghost old_mv = mv;
856            let tracked (lhs, rhs) = mv.split(
857                owner_w.range@.start,
858                (owner_w.range@.end - owner_w.range@.start) as usize,
859            );
860
861            owner_w.mem_view = Some(VmIoMemView::WriteView(lhs));
862            vm_space_owner.mem_view = Some(rhs);
863
864            assert forall|va: usize|
865                #![auto]
866                owner_w.range@.start <= va < owner_w.range@.end implies lhs.addr_transl(va) is Some by {
867                if owner_w.range@.start <= va && va < owner_w.range@.end {
868                    assert(lhs.mappings =~= old_mv.mappings.filter(
869                        |m: Mapping|
870                            m.va_range.start < (owner_w.range@.end) && m.va_range.end
871                                > owner_w.range@.start,
872                    ));
873                    let o_lhs = lhs.mappings.filter(
874                        |m: Mapping| m.va_range.start <= va < m.va_range.end,
875                    );
876                    let o_mv = old_mv.mappings.filter(
877                        |m: Mapping| m.va_range.start <= va < m.va_range.end,
878                    ); 
879
880                    assert(old_mv.addr_transl(va) is Some);
881                    assert(o_mv.len() > 0);
882                    assert(o_lhs.len() > 0) by {
883                        broadcast use vstd::set::axiom_set_choose_len;
884
885                        let m = o_mv.choose();
886                        assert(o_mv.contains(m));
887                        assert(m.va_range.start <= va < m.va_range.end);
888                        assert(o_lhs.contains(m));
889                    }
890                }
891            }
892        }
893    }
894
895    /// Creates a reader to read data from the user space of the current task.
896    ///
897    /// Returns [`Err`] if this [`VmSpace`] is not belonged to the user space of the current task
898    /// or the `vaddr` and `len` do not represent a user space memory range.
899    ///
900    /// Users must ensure that no other page table is activated in the current task during the
901    /// lifetime of the created [`VmReader`]. This guarantees that the [`VmReader`] can operate
902    /// correctly.
903    #[inline]
904    #[verus_spec(r =>
905        with
906            Tracked(owner): Tracked<&'a mut VmSpaceOwner<'a>>,
907                -> vm_reader_owner: Tracked<Option<VmIoOwner<'a>>>,
908        requires
909            self.reader_requires(*old(owner), vaddr, len),
910        ensures
911            self.reader_ensures(*old(owner), *owner, vaddr, len, r, vm_reader_owner@),
912    )]
913    pub fn create_reader(&self, vaddr: Vaddr, len: usize) -> Result<VmReader<'a>> {
914        let vptr = VirtPtr::from_vaddr(vaddr, len);
915        let ghost id = owner.new_vm_io_id();
916        proof_decl! {
917            let tracked mut vm_reader_owner;
918        }
919        // SAFETY: The memory range is in user space, as checked above.
920        let reader = unsafe {
921            proof_with!(Ghost(id) => Tracked(vm_reader_owner));
922            VmReader::from_user_space(vptr)
923        };
924
925        proof_with!(|= Tracked(Some(vm_reader_owner)));
926        Ok(reader)
927    }
928
929    /// Returns `Err` if this `VmSpace` is not belonged to the user space of the current task
930    /// or the `vaddr` and `len` do not represent a user space memory range.
931    ///
932    /// Users must ensure that no other page table is activated in the current task during the
933    /// lifetime of the created `VmWriter`. This guarantees that the `VmWriter` can operate correctly.
934    #[inline]
935    #[verifier::external_body]
936    #[verus_spec(r =>
937        with
938            Tracked(owner): Tracked<&mut VmSpaceOwner<'a>>,
939                -> new_owner: Tracked<Option<VmIoOwner<'a>>>,
940        requires
941            self.writer_requires(*old(owner), vaddr, len),
942        ensures
943            self.writer_ensures(*old(owner), *owner, vaddr, len, r, new_owner@),
944    )]
945    pub fn create_writer(self, vaddr: Vaddr, len: usize) -> Result<VmWriter<'a>> {
946        let vptr = VirtPtr::from_vaddr(vaddr, len);
947        let ghost id = owner.new_vm_io_id();
948        proof_decl! {
949            let tracked mut vm_writer_owner;
950        }
951
952        // `VmWriter` is neither `Sync` nor `Send`, so it will not live longer than the current
953        // task. This ensures that the correct page table is activated during the usage period of
954        // the `VmWriter`.
955        //
956        // SAFETY: The memory range is in user space, as checked above.
957        let writer = unsafe {
958            proof_with!(Ghost(id) => Tracked(vm_writer_owner));
959            VmWriter::from_user_space(vptr)
960        };
961
962        proof_with!(|= Tracked(Some(vm_writer_owner)));
963        Ok(writer)
964    }
965}
966
967/*
968impl Default for VmSpace {
969    fn default() -> Self {
970        Self::new()
971    }
972}
973*/
974
975/// The cursor for querying over the VM space without modifying it.
976///
977/// It exclusively owns a sub-tree of the page table, preventing others from
978/// reading or modifying the same sub-tree. Two read-only cursors can not be
979/// created from the same virtual address range either.
980pub struct Cursor<'a, A: InAtomicMode>(pub crate::mm::page_table::Cursor<'a, UserPtConfig, A>);
981
982/*
983impl<A: InAtomicMode> Iterator for Cursor<'_, A> {
984    type Item = (Range<Vaddr>, Option<MappedItem>);
985
986    fn next(&mut self) -> Option<Self::Item> {
987        self.0.next()
988    }
989}
990*/
991
992#[verus_verify]
993impl<'rcu, A: InAtomicMode> Cursor<'rcu, A> {
994    
995    pub open spec fn invariants(
996        self,
997        owner: CursorOwner<'rcu, UserPtConfig>,
998        regions: MetaRegionOwners,
999        guards: Guards<'rcu, UserPtConfig>,
1000    ) -> bool {
1001        &&& self.0.inv()
1002        &&& self.0.wf(owner)
1003        &&& owner.inv()
1004        &&& regions.inv()
1005        &&& owner.children_not_locked(guards)
1006        &&& owner.nodes_locked(guards)
1007        &&& owner.relate_region(regions)
1008        &&& !owner.popped_too_high
1009    }
1010
1011    pub open spec fn query_success_requires(self) -> bool {
1012        self.0.barrier_va.start <= self.0.va < self.0.barrier_va.end
1013    }
1014
1015    pub open spec fn query_success_ensures(
1016        self,
1017        view: CursorView<UserPtConfig>,
1018        range: Range<Vaddr>,
1019        item: Option<MappedItem>
1020    ) -> bool {
1021        if view.present() {
1022            let found_item = view.query_item_spec();
1023            &&& range.start == found_item.va_range.start
1024            &&& range.end == found_item.va_range.end
1025            &&& item is Some
1026            &&& meta_to_frame(item.unwrap().frame.ptr.addr()) == found_item.pa_range.start
1027        } else {
1028            &&& range.start == self.0.va
1029            &&& item is None
1030        }
1031    }
1032
1033    /// Queries the mapping at the current virtual address.
1034    ///
1035    /// If the cursor is pointing to a valid virtual address that is locked,
1036    /// it will return the virtual address range and the mapped item.
1037    /// ## Preconditions
1038    /// In addition to the standard well-formedness conditions, the function will give an error
1039    /// if the cursor is outside of the locked range.
1040    /// ## Postconditions
1041    /// If the cursor is valid, the result of the lookup is given by [`query_success_ensures`](Self::query_success_ensures).
1042    /// The mapping that is returned corresponds to the abstract mapping given by [`query_item_spec`](CursorView::query_item_spec).
1043    #[verus_spec(r =>
1044        with Tracked(owner): Tracked<&mut CursorOwner<'rcu, UserPtConfig>>,
1045            Tracked(regions): Tracked<&mut MetaRegionOwners>,
1046            Tracked(guards): Tracked<&mut Guards<'rcu, UserPtConfig>>
1047        requires
1048            old(self).invariants(*old(owner), *old(regions), *old(guards))
1049        ensures
1050            self.invariants(*owner, *regions, *guards),
1051            self.query_success_requires() ==> {
1052                &&& r is Ok
1053                &&& self.query_success_ensures(self.0.model(*owner), r.unwrap().0, r.unwrap().1)
1054            }
1055    )]
1056    pub fn query(&mut self) -> Result<(Range<Vaddr>, Option<MappedItem>)>
1057    {
1058        proof { admit() };
1059        Ok(
1060            #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
1061            self.0.query()?,
1062        )
1063    }
1064
1065    /// Moves the cursor forward to the next mapped virtual address.
1066    ///
1067    /// If there is mapped virtual address following the current address within
1068    /// next `len` bytes, it will return that mapped address. In this case,
1069    /// the cursor will stop at the mapped address.
1070    ///
1071    /// Otherwise, it will return `None`. And the cursor may stop at any
1072    /// address after `len` bytes.
1073    ///
1074    /// # Panics
1075    ///
1076    /// Panics if the length is longer than the remaining range of the cursor.
1077    #[verus_spec(r =>
1078        with Tracked(owner): Tracked<&mut CursorOwner<'rcu, UserPtConfig>>,
1079            Tracked(regions): Tracked<&mut MetaRegionOwners>,
1080            Tracked(guards): Tracked<&mut Guards<'rcu, UserPtConfig>>
1081    )]
1082    pub fn find_next(&mut self, len: usize) -> (res: Option<Vaddr>)
1083        requires
1084            old(owner).inv(),
1085            old(self).0.wf(*old(owner)),
1086            old(regions).inv(),
1087            old(self).0.level < old(self).0.guard_level,
1088            old(self).0.inv(),
1089            old(owner).in_locked_range(),
1090            old(owner).children_not_locked(*old(guards)),
1091            old(owner).nodes_locked(*old(guards)),
1092            old(owner).relate_region(*old(regions)),
1093            !old(owner).popped_too_high,
1094            len % PAGE_SIZE() == 0,
1095            old(self).0.va + len <= old(self).0.barrier_va.end,
1096        ensures
1097            owner.inv(),
1098            self.0.wf(*owner),
1099            regions.inv(),
1100    {
1101        #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
1102        self.0.find_next(len)
1103    }
1104
1105    /// Jump to the virtual address.
1106    #[verus_spec(
1107        with Tracked(owner): Tracked<&mut CursorOwner<'rcu, UserPtConfig>>,
1108            Tracked(regions): Tracked<&mut MetaRegionOwners>,
1109            Tracked(guards): Tracked<&mut Guards<'rcu, UserPtConfig>>
1110    )]
1111    pub fn jump(&mut self, va: Vaddr) -> Result<()>
1112        requires
1113            old(owner).inv(),
1114            old(self).0.wf(*old(owner)),
1115            old(self).0.level < old(self).0.guard_level,
1116            old(self).0.inv(),
1117            old(owner).in_locked_range(),
1118            old(owner).children_not_locked(*old(guards)),
1119            old(owner).nodes_locked(*old(guards)),
1120            old(owner).relate_region(*old(regions)),
1121    {
1122        (#[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
1123        self.0.jump(va))?;
1124        Ok(())
1125    }
1126
1127    /// Get the virtual address of the current slot.
1128    pub fn virt_addr(&self) -> Vaddr {
1129        self.0.virt_addr()
1130    }
1131}
1132
1133/// The cursor for modifying the mappings in VM space.
1134///
1135/// It exclusively owns a sub-tree of the page table, preventing others from
1136/// reading or modifying the same sub-tree.
1137pub struct CursorMut<'a, A: InAtomicMode> {
1138    pub pt_cursor: crate::mm::page_table::CursorMut<
1139        'a,
1140        UserPtConfig,
1141        A,
1142    >,
1143    // We have a read lock so the CPU set in the flusher is always a superset
1144    // of actual activated CPUs.
1145    //    flusher: TlbFlusher<'a, DisabledPreemptGuard>,
1146}
1147
1148impl<'a, A: InAtomicMode> CursorMut<'a, A> {
1149    pub open spec fn query_requries(
1150        cursor: Self,
1151        owner: CursorOwner<'a, UserPtConfig>,
1152        guard_perm: vstd::simple_pptr::PointsTo<PageTableGuard<'a, UserPtConfig>>,
1153        regions: MetaRegionOwners,
1154    ) -> bool {
1155        &&& cursor.pt_cursor.inner.wf(owner)
1156        &&& owner.inv()
1157        &&& regions.inv()
1158    }
1159
1160    pub open spec fn query_ensures(
1161        old_cursor: Self,
1162        new_cursor: Self,
1163        owner: CursorOwner<'a, UserPtConfig>,
1164        guard_perm: vstd::simple_pptr::PointsTo<PageTableGuard<'a, UserPtConfig>>,
1165        old_regions: MetaRegionOwners,
1166        new_regions: MetaRegionOwners,
1167        r: Result<(Range<Vaddr>, Option<MappedItem>)>,
1168    ) -> bool {
1169        &&& new_regions.inv()
1170        &&& new_cursor.pt_cursor.inner.wf(owner)
1171    }
1172
1173    /// Queries the mapping at the current virtual address.
1174    ///
1175    /// This is the same as [`Cursor::query`].
1176    ///
1177    /// If the cursor is pointing to a valid virtual address that is locked,
1178    /// it will return the virtual address range and the mapped item.
1179    #[verus_spec(
1180        with Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
1181            Tracked(regions): Tracked<&mut MetaRegionOwners>,
1182            Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>
1183        requires
1184            old(owner).inv(),
1185            old(self).pt_cursor.inner.wf(*old(owner)),
1186            old(regions).inv(),
1187            old(self).pt_cursor.inner.inv(),
1188            old(owner).children_not_locked(*old(guards)),
1189            old(owner).nodes_locked(*old(guards)),
1190            old(owner).relate_region(*old(regions)),
1191            old(owner).in_locked_range(),
1192            !old(owner).popped_too_high,
1193    )]
1194    pub fn query(&mut self) -> Result<(Range<Vaddr>, Option<MappedItem>)> {
1195        Ok(
1196            #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
1197            self.pt_cursor.query()?,
1198        )
1199    }
1200
1201    /// Moves the cursor forward to the next mapped virtual address.
1202    ///
1203    /// This is the same as [`Cursor::find_next`].
1204    #[verus_spec(res =>
1205        with Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
1206            Tracked(regions): Tracked<&mut MetaRegionOwners>,
1207            Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>
1208    )]
1209    pub fn find_next(&mut self, len: usize) -> (res: Option<Vaddr>)
1210        requires
1211            old(owner).inv(),
1212            old(self).pt_cursor.inner.wf(*old(owner)),
1213            old(regions).inv(),
1214            old(self).pt_cursor.inner.level < old(self).pt_cursor.inner.guard_level,
1215            old(self).pt_cursor.inner.inv(),
1216            old(owner).in_locked_range(),
1217            old(owner).children_not_locked(*old(guards)),
1218            old(owner).nodes_locked(*old(guards)),
1219            old(owner).relate_region(*old(regions)),
1220            !old(owner).popped_too_high,
1221            len % PAGE_SIZE() == 0,
1222            old(self).pt_cursor.inner.va + len <= old(self).pt_cursor.inner.barrier_va.end,
1223        ensures
1224            owner.inv(),
1225            self.pt_cursor.inner.wf(*owner),
1226            regions.inv(),
1227    {
1228        #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
1229        self.pt_cursor.find_next(len)
1230    }
1231
1232    /// Jump to the virtual address.
1233    ///
1234    /// This is the same as [`Cursor::jump`].
1235    #[verus_spec(r =>
1236        with
1237            Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
1238            Tracked(regions): Tracked<&mut MetaRegionOwners>,
1239            Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>
1240        requires
1241            old(owner).inv(),
1242            old(self).pt_cursor.inner.wf(*old(owner)),
1243            old(self).pt_cursor.inner.level < old(self).pt_cursor.inner.guard_level,
1244            old(self).pt_cursor.inner.inv(),
1245    )]
1246    pub fn jump(&mut self, va: Vaddr) -> Result<()> {
1247        (#[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
1248        self.pt_cursor.jump(va))?;
1249        Ok(())
1250    }
1251
1252    /// Get the virtual address of the current slot.
1253    #[verus_spec(r =>
1254        returns
1255            self.pt_cursor.inner.va,
1256    )]
1257    pub fn virt_addr(&self) -> Vaddr {
1258        self.pt_cursor.virt_addr()
1259    }
1260
1261    /* TODO: come back after TLB
1262    /// Get the dedicated TLB flusher for this cursor.
1263    pub fn flusher(&mut self) -> &mut TlbFlusher<'a, DisabledPreemptGuard> {
1264        &mut self.flusher
1265    }
1266    */
1267
1268    /// Collects the invariants of the cursor, its owner, and associated tracked structures.
1269    /// The cursor must be well-formed with respect to its owner. This will hold before and after the call to `map`.
1270    pub open spec fn map_cursor_inv(self, cursor_owner: CursorOwner<'a, UserPtConfig>, guards: Guards<'a, UserPtConfig>, regions: MetaRegionOwners) -> bool {
1271        &&& cursor_owner.inv()
1272        &&& self.pt_cursor.inner.wf(cursor_owner)
1273        &&& self.pt_cursor.inner.inv()
1274        &&& cursor_owner.children_not_locked(guards)
1275        &&& cursor_owner.nodes_locked(guards)
1276        &&& cursor_owner.relate_region(regions)
1277        &&& !cursor_owner.popped_too_high
1278        &&& regions.inv()
1279    }
1280
1281    /// These conditions must hold before the call to `map` but may not be maintained after the call.
1282    /// The cursor must be within the locked range and below the guard level, but it may move outside the
1283    /// range if the frame being mapped is exactly the length of the remaining range.
1284    pub open spec fn map_cursor_requires(self, cursor_owner: CursorOwner<'a, UserPtConfig>) -> bool {
1285        &&& cursor_owner.in_locked_range()
1286        &&& self.pt_cursor.inner.level < self.pt_cursor.inner.guard_level
1287        &&& self.pt_cursor.inner.va < self.pt_cursor.inner.barrier_va.end
1288    }
1289
1290    /// Collects the conditions that must hold on the frame being mapped.
1291    /// The frame must be well-formed with respect to the entry owner. When converted into a `MappedItem`,
1292    /// its physical address must also match, and its level must be between 1 and the highest translation level.
1293    pub open spec fn map_item_requires(self, frame: UFrame, prop: PageProperty, entry_owner: EntryOwner<UserPtConfig>) -> bool {
1294        let item = MappedItem { frame: frame, prop: prop };
1295        let (paddr, level, prop0) = UserPtConfig::item_into_raw_spec(item);
1296        &&& prop == prop0
1297        &&& entry_owner.frame.unwrap().mapped_pa == paddr
1298        &&& entry_owner.frame.unwrap().prop == prop
1299        &&& level <= UserPtConfig::HIGHEST_TRANSLATION_LEVEL()
1300        &&& 1 <= level <= NR_LEVELS() // Should be property of item_into_raw
1301        &&& Child::Frame(paddr, level, prop0).wf(entry_owner)
1302        &&& self.pt_cursor.inner.va + page_size(level) <= self.pt_cursor.inner.barrier_va.end
1303        &&& entry_owner.inv()
1304        &&& self.pt_cursor.inner.va % page_size(level) == 0
1305    }
1306
1307    /// The result of a call to `map`. Constructs a `Mapping` from the frame being mapped and the cursor's current virtual address.
1308    /// The page table's cursor view will be updated with this mapping, replacing the previous mapping (if any).
1309    pub open spec fn map_item_ensures(
1310        self,
1311        frame: UFrame,
1312        prop: PageProperty,
1313        old_cursor_view: CursorView<UserPtConfig>,
1314        cursor_view: CursorView<UserPtConfig>,
1315    ) -> bool {
1316        let item = MappedItem { frame: frame, prop: prop };
1317        let (paddr, level, prop0) = UserPtConfig::item_into_raw_spec(item);
1318        cursor_view == old_cursor_view.map_spec(paddr, page_size(level), prop)
1319    }
1320
1321    /// Map a frame into the current slot.
1322    ///
1323    /// This method will bring the cursor to the next slot after the modification.
1324    /// ## Preconditions
1325    /// The cursor must be within the locked range and below the guard level, and the frame must fit within the remaining range of the cursor.
1326    /// The cursor must satisfy all invariants, and the frame must be well-formed when converted into a `MappedItem` ([`map_item_requires`](Self::map_item_requires)).
1327    /// ## Postconditions
1328    /// After the call, the cursor will satisfy all invariants, and will map the frame into the current slot according to [`map_spec`](CursorView::map_spec).
1329    /// After the call, the TLB will not contain any entries for the virtual address range being mapped (TODO).
1330    #[verus_spec(
1331        with Tracked(cursor_owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
1332            Tracked(entry_owner): Tracked<EntryOwner<UserPtConfig>>,
1333            Tracked(regions): Tracked<&mut MetaRegionOwners>,
1334            Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>,
1335    )]
1336    pub fn map(&mut self, frame: UFrame, prop: PageProperty)
1337        requires
1338            old(self).map_cursor_requires(*old(cursor_owner)),
1339            old(self).map_cursor_inv(*old(cursor_owner), *old(guards), *old(regions)),
1340            old(self).map_item_requires(frame, prop, entry_owner),
1341        ensures
1342            self.map_cursor_inv(*cursor_owner, *guards, *regions),
1343            old(self).map_item_ensures(frame, prop, old(self).pt_cursor.inner.model(*old(cursor_owner)), self.pt_cursor.inner.model(*cursor_owner)),
1344    {
1345        let start_va = self.virt_addr();
1346        let item = MappedItem { frame: frame, prop: prop };
1347
1348        // SAFETY: It is safe to map untyped memory into the userspace.
1349        let Err(frag) = (
1350        #[verus_spec(with Tracked(cursor_owner), Tracked(entry_owner), Tracked(regions), Tracked(guards))]
1351        self.pt_cursor.map(item)) else {
1352            return ;  // No mapping exists at the current address.
1353        };
1354
1355        /*        match frag {
1356            PageTableFrag::Mapped { va, item } => {
1357                debug_assert_eq!(va, start_va);
1358                let old_frame = item.frame;
1359                self.flusher
1360                    .issue_tlb_flush_with(TlbFlushOp::Address(start_va), old_frame.into());
1361                self.flusher.dispatch_tlb_flush();
1362            }
1363            PageTableFrag::StrayPageTable { .. } => {
1364                panic!("`UFrame` is base page sized but re-mapping out a child PT");
1365            }
1366        }
1367*/
1368    }
1369
1370    /// Clears the mapping starting from the current slot,
1371    /// and returns the number of unmapped pages.
1372    ///
1373    /// This method will bring the cursor forward by `len` bytes in the virtual
1374    /// address space after the modification.
1375    ///
1376    /// Already-absent mappings encountered by the cursor will be skipped. It
1377    /// is valid to unmap a range that is not mapped.
1378    ///
1379    /// It must issue and dispatch a TLB flush after the operation. Otherwise,
1380    /// the memory safety will be compromised. Please call this function less
1381    /// to avoid the overhead of TLB flush. Using a large `len` is wiser than
1382    /// splitting the operation into multiple small ones.
1383    ///
1384    /// # Panics
1385    /// Panics if:
1386    ///  - the length is longer than the remaining range of the cursor;
1387    ///  - the length is not page-aligned.
1388    #[verus_spec(r =>
1389        requires
1390            len % PAGE_SIZE() == 0,
1391            old(self).pt_cursor.inner.va % PAGE_SIZE() == 0,
1392            old(self).pt_cursor.inner.va + len <= KERNEL_VADDR_RANGE().end as int,
1393        ensures
1394    )]
1395    #[verifier::external_body]
1396    pub fn unmap(&mut self, len: usize) -> usize {
1397        let end_va = self.virt_addr() + len;
1398        let mut num_unmapped: usize = 0;
1399
1400        proof {
1401            assert((self.pt_cursor.inner.va + len) % PAGE_SIZE() as int == 0) by (compute);
1402        }
1403
1404        #[verus_spec(
1405            invariant_except_break
1406                self.pt_cursor.inner.va % PAGE_SIZE() == 0,
1407                end_va % PAGE_SIZE() == 0,
1408        )]
1409        loop {
1410            // SAFETY: It is safe to un-map memory in the userspace.
1411            let Some(frag) = (unsafe { self.pt_cursor.take_next(end_va - self.virt_addr()) }) else {
1412                break ;  // No more mappings in the range.
1413            };
1414
1415            match frag {
1416                PageTableFrag::Mapped { va, item, .. } => {
1417                    let frame = item.frame;
1418                    num_unmapped += 1;
1419                    //                    self.flusher
1420                    //                        .issue_tlb_flush_with(TlbFlushOp::Address(va), frame.into());
1421                },
1422                PageTableFrag::StrayPageTable { pt, va, len, num_frames } => {
1423                    num_unmapped += num_frames;
1424                    //                    self.flusher
1425                    //                        .issue_tlb_flush_with(TlbFlushOp::Range(va..va + len), pt);
1426                },
1427            }
1428        }
1429
1430        //        self.flusher.dispatch_tlb_flush();
1431
1432        num_unmapped
1433    }
1434
1435    /// Applies the operation to the next slot of mapping within the range.
1436    ///
1437    /// The range to be found in is the current virtual address with the
1438    /// provided length.
1439    ///
1440    /// The function stops and yields the actually protected range if it has
1441    /// actually protected a page, no matter if the following pages are also
1442    /// required to be protected.
1443    ///
1444    /// It also makes the cursor moves forward to the next page after the
1445    /// protected one. If no mapped pages exist in the following range, the
1446    /// cursor will stop at the end of the range and return [`None`].
1447    ///
1448    /// Note that it will **NOT** flush the TLB after the operation. Please
1449    /// make the decision yourself on when and how to flush the TLB using
1450    /// [`Self::flusher`].
1451    ///
1452    /// # Panics
1453    ///
1454    /// Panics if the length is longer than the remaining range of the cursor.
1455    #[verus_spec(r =>
1456        with Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
1457            Tracked(regions): Tracked<&mut MetaRegionOwners>,
1458            Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>,
1459        requires
1460            old(regions).inv(),
1461            old(owner).inv(),
1462            !old(owner).cur_entry_owner().is_absent(),
1463            old(self).pt_cursor.inner.wf(*old(owner)),
1464            old(self).pt_cursor.inner.inv(),
1465            old(owner).in_locked_range(),
1466            !old(owner).popped_too_high,
1467            old(owner).children_not_locked(*old(guards)),
1468            old(owner).nodes_locked(*old(guards)),
1469            old(owner).relate_region(*old(regions)),
1470            len % PAGE_SIZE() == 0,
1471            old(self).pt_cursor.inner.level < NR_LEVELS(),
1472            old(self).pt_cursor.inner.va + len <= old(self).pt_cursor.inner.barrier_va.end,
1473        ensures
1474    )]
1475    pub fn protect_next(
1476        &mut self,
1477        len: usize,
1478        op: impl FnOnce(PageProperty) -> PageProperty,
1479    ) -> Option<Range<Vaddr>> {
1480        // SAFETY: It is safe to protect memory in the userspace.
1481        unsafe {
1482            #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
1483            self.pt_cursor.protect_next(len, op)
1484        }
1485    }
1486}
1487
1488/*cpu_local_cell! {
1489    /// The `Arc` pointer to the activated VM space on this CPU. If the pointer
1490    /// is NULL, it means that the activated page table is merely the kernel
1491    /// page table.
1492    // TODO: If we are enabling ASID, we need to maintain the TLB state of each
1493    // CPU, rather than merely the activated `VmSpace`. When ASID is enabled,
1494    // the non-active `VmSpace`s can still have their TLB entries in the CPU!
1495    static ACTIVATED_VM_SPACE: *const VmSpace = core::ptr::null();
1496}*/
1497/*#[cfg(ktest)]
1498pub(super) fn get_activated_vm_space() -> *const VmSpace {
1499    ACTIVATED_VM_SPACE.load()
1500}*/
1501} // verus!