ostd/mm/
vm_space.rs

1// SPDX-License-Identifier: MPL-2.0
2//! Virtual memory space management.
3//!
4//! The [`VmSpace`] struct is provided to manage the virtual memory space of a
5//! user. Cursors are used to traverse and modify over the virtual memory space
6//! concurrently. The VM space cursor [`self::Cursor`] is just a wrapper over
7//! the page table cursor, providing efficient, powerful concurrent accesses
8//! to the page table.
9use alloc::vec::Vec;
10use vstd::pervasive::{arbitrary, proof_from_false};
11use vstd::prelude::*;
12
13use crate::specs::mm::virt_mem_newer::{MemView, VirtPtr};
14
15use crate::error::Error;
16use crate::mm::frame::untyped::UFrame;
17use crate::mm::io::VmIoMemView;
18use crate::mm::page_table::*;
19use crate::mm::page_table::{EntryOwner, PageTableFrag, PageTableGuard};
20use crate::specs::arch::*;
21use crate::specs::mm::frame::mapping::meta_to_frame;
22use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
23use crate::specs::mm::page_table::cursor::owners::CursorOwner;
24use crate::specs::mm::page_table::*;
25use crate::specs::mm::tlb::TlbModel;
26use crate::specs::task::InAtomicMode;
27use core::marker::PhantomData;
28use core::{ops::Range, sync::atomic::Ordering};
29use vstd_extra::ghost_tree::*;
30
31use crate::mm::tlb::*;
32use crate::specs::mm::cpu::AtomicCpuSet;
33
34use crate::{
35    mm::{
36        io::{VmIoOwner, VmReader, VmWriter},
37        page_prop::PageProperty,
38        Paddr, PagingConstsTrait, PagingLevel, Vaddr, MAX_USERSPACE_VADDR,
39    },
40    prelude::*,
41};
42
43use alloc::sync::Arc;
44
45#[path = "../../specs/mm/vm_space.rs"]
46pub mod vm_space_specs;
47use vm_space_specs::*;
48
49verus! {
50
51/// A virtual address space for user-mode tasks, enabling safe manipulation of user-space memory.
52///
53/// The [`VmSpace`] type provides memory isolation guarantees between user-space and
54/// kernel-space. For example, given an arbitrary user-space pointer, one can read and
55/// write the memory location referred to by the user-space pointer without the risk of
56/// breaking the memory safety of the kernel space.
57///
58/// # Task Association Semantics
59///
60/// As far as OSTD is concerned, a [`VmSpace`] is not necessarily associated with a task. Once a
61/// [`VmSpace`] is activated (see [`VmSpace::activate`]), it remains activated until another
62/// [`VmSpace`] is activated **possibly by another task running on the same CPU**.
63///
64/// This means that it's up to the kernel to ensure that a task's [`VmSpace`] is always activated
65/// while the task is running. This can be done by using the injected post schedule handler
66/// (see [`inject_post_schedule_handler`]) to always activate the correct [`VmSpace`] after each
67/// context switch.
68///
69/// If the kernel otherwise decides not to ensure that the running task's [`VmSpace`] is always
70/// activated, the kernel must deal with race conditions when calling methods that require the
71/// [`VmSpace`] to be activated, e.g., [`UserMode::execute`], [`VmReader`] and [`VmWriter`].
72/// Otherwise, the behavior is unspecified, though it's guaranteed _not_ to compromise the kernel's
73/// memory safety.
74///
75/// # Memory Backing
76///
77/// A newly-created [`VmSpace`] is not backed by any physical memory pages. To
78/// provide memory pages for a [`VmSpace`], one can allocate and map physical
79/// memory ([`UFrame`]s) to the [`VmSpace`] using the cursor.
80///
81/// A [`VmSpace`] can also attach a page fault handler, which will be invoked to
82/// handle page faults generated from user space.
83///
84/// [`inject_post_schedule_handler`]: crate::task::inject_post_schedule_handler
85/// [`UserMode::execute`]: crate::user::UserMode::execute
86/// [`VmReader`]: crate::mm::io::VmWriter
87/// [`VmReader`]: crate::mm::io::VmReader
88/// # Verification Design
89///
90/// A [`VmSpace`] has a corresponding [`VmSpaceOwner`] object that is used to track its state,
91/// and against which its invariants are stated. The [`VmSpaceOwner`] catalogues the readers and writers
92/// that are associated with the [`VmSpace`], and the [`MemView`] which encodes the active page table and
93/// the subset of the TLB that covers the same virtual address space.
94/// All proofs about the correctness of the readers and writers are founded on the well-formedness of the [`MemView`]:
95///
96/// ```rust
97/// open spec fn mem_view_wf(self) -> bool {
98///    &&& self.mem_view is Some <==> self.mv_range@ is Some
99///    // This requires that TotalMapping (mvv) = mv ∪ writer mappings ∪ reader mappings
100///    &&& self.mem_view matches Some(remaining_view)
101///        ==> self.mv_range@ matches Some(total_view)
102///        ==> {
103///        &&& remaining_view.mappings_are_disjoint()
104///        &&& remaining_view.mappings.finite()
105///        &&& total_view.mappings_are_disjoint()
106///        &&& total_view.mappings.finite()
107///        // ======================
108///        // Remaining Consistency
109///        // ======================
110///        &&& remaining_view.mappings.subset_of(total_view.mappings)
111///        &&& remaining_view.memory.dom().subset_of(
112///            total_view.memory.dom(),
113///        )
114///        // =====================
115///        // Total View Consistency
116///        // =====================
117///        &&& forall|va: usize|
118///            remaining_view.addr_transl(va) == total_view.addr_transl(
119///                va,
120///            )
121///        // =====================
122///        // Writer correctness
123///        // =====================
124///        &&& forall|i: int|
125///            0 <= i < self.writers.len() as int ==> {
126///                &&& self.writers[i].inv()
127///            }
128///        }
129///    }
130/// }
131/// ```
132pub struct VmSpace<'a> {
133    pub pt: PageTable<UserPtConfig>,
134    pub cpus: AtomicCpuSet,
135    pub _marker: PhantomData<&'a ()>,
136}
137
138type Result<A> = core::result::Result<A, Error>;
139
140#[verus_verify]
141impl<'a> VmSpace<'a> {
142    /// A spec function to create a new [`VmSpace`] instance.
143    ///
144    /// The reason why this function is marked as `uninterp` is that the implementation details
145    /// of the [`VmSpace`] struct are not important for the verification of its clients.
146    pub uninterp spec fn new_spec() -> Self;
147
148    /// Checks the preconditions for creating a reader or writer for the given virtual address range.
149    ///
150    /// Essentially, this requires that
151    ///
152    /// - the invariants of the [`VmSpace`] and [`VmSpaceOwner`] hold
153    ///   (see [`VmSpaceOwner::inv_with`] and [`VmSpaceOwner::inv`]);
154    /// - the [`VmSpaceOwner`] is active (via some threads or activation function);
155    /// - the [`VmSpaceOwner`] can create a reader or writer for the given virtual address range
156    ///   (see [`VmSpaceOwner::can_create_reader`] and [`VmSpaceOwner::can_create_writer`]);
157    /// - the virtual address range is valid (non-zero, non-empty, and within user-space limits);
158    /// - the currently active page table is the one owned by the [`VmSpace`] (note that this is
159    ///   an `uninterp` spec function as this is non-trackable during verification).
160    pub open spec fn reader_requires(
161        &self,
162        vm_owner: VmSpaceOwner<'a>,
163        vaddr: Vaddr,
164        len: usize,
165    ) -> bool {
166        &&& vm_owner.inv()
167        &&& vm_owner.active
168        &&& vm_owner.can_create_reader(vaddr, len)
169        &&& vaddr != 0 && len > 0 && vaddr + len <= MAX_USERSPACE_VADDR
170        &&& current_page_table_paddr_spec() == self.pt.root_paddr_spec()
171    }
172
173    /// Checks the preconditions for creating a writer for the given virtual address range.
174    ///
175    /// Most of the pre-conditions are the same as those for creating a reader (see
176    /// [`Self::reader_requires`]), except that the caller must also have permission to
177    /// create a writer for the given virtual address range.
178    pub open spec fn writer_requires(
179        &self,
180        vm_owner: VmSpaceOwner<'a>,
181        vaddr: Vaddr,
182        len: usize,
183    ) -> bool {
184        &&& vm_owner.inv()
185        &&& vm_owner.active
186        &&& vm_owner.can_create_writer(vaddr, len)
187        &&& vaddr != 0 && len > 0 && vaddr + len <= MAX_USERSPACE_VADDR
188        &&& current_page_table_paddr_spec() == self.pt.root_paddr_spec()
189    }
190
191    /// The guarantees of the created reader or writer, assuming the preconditions are satisfied.
192    ///
193    /// Essentially, this ensures that
194    ///
195    /// - the invariants of the new [`VmSpace`] and the reader or writer hold;
196    /// - the reader or writer is associated with a [`VmIoOwner`] that is well-formed with respect to
197    ///   the reader or writer;
198    /// - the reader or writer has no memory view, as the memory view will be taken from the
199    ///   [`VmSpaceOwner`] when the reader or writer is activated.
200    ///
201    /// # Special Note
202    ///
203    /// The newly created instance of [`VmReader`] and its associated [`VmIoOwner`] are not yet
204    /// activated, so the guarantees about the memory view only require that the memory view is
205    /// [`None`]. The guarantees about the memory view will be provided by the activation function
206    /// (see [`VmSpaceOwner::activate_reader`] and [`VmSpaceOwner::activate_writer`]).
207    ///
208    /// We avoid mixing the creation, usage and deletion into one giant function as this would
209    /// create some unnecessary life-cycle management complexities and not really help with the
210    /// verification itself.
211    pub open spec fn reader_ensures(
212        &self,
213        vm_owner_old: VmSpaceOwner<'_>,
214        vm_owner_new: VmSpaceOwner<'_>,
215        vaddr: Vaddr,
216        len: usize,
217        r: Result<VmReader<'_>>,
218        r_owner: Option<VmIoOwner<'_>>,
219    ) -> bool {
220        &&& vm_owner_new.inv()
221        &&& vm_owner_new.readers == vm_owner_old.readers
222        &&& vm_owner_new.writers == vm_owner_old.writers
223        &&& r matches Ok(reader) && r_owner matches Some(owner) ==> {
224            &&& reader.inv()
225            &&& owner.inv_with_reader(reader)
226            &&& owner.mem_view is None
227        }
228    }
229
230    /// The guarantees of the created writer, assuming the preconditions are satisfied.
231    ///
232    /// Most of the guarantees are the same as those for creating a reader (see
233    /// [`Self::reader_ensures`]), except that the writer is associated with a [`VmIoOwner`] that is well-formed with respect to the writer.
234    pub open spec fn writer_ensures(
235        &self,
236        vm_owner_old: VmSpaceOwner<'a>,
237        vm_owner_new: VmSpaceOwner<'a>,
238        vaddr: Vaddr,
239        len: usize,
240        r: Result<VmWriter<'a>>,
241        r_owner: Option<VmIoOwner<'a>>,
242    ) -> bool {
243        &&& vm_owner_new.inv()
244        &&& vm_owner_new.readers == vm_owner_old.readers
245        &&& vm_owner_new.writers == vm_owner_old.writers
246        &&& r matches Ok(writer) && r_owner matches Some(owner) ==> {
247            &&& writer.inv()
248            &&& owner.inv_with_writer(writer)
249            &&& owner.mem_view is None
250        }
251    }
252
253    /// Creates a new VM address space.
254    ///
255    /// # Verification Design
256    ///
257    /// This function is marked as `external_body` for now as the current design does not entail
258    /// the concrete implementation details of the underlying data structure of the [`VmSpace`].
259    ///
260    /// ## Preconditions
261    /// None
262    ///
263    /// ## Postconditions
264    /// - The returned [`VmSpace`] instance satisfies the invariants of [`VmSpace`]
265    /// - The returned [`VmSpace`] instance is equal to the one created by the [`Self::new_spec`]
266    ///   function, which is an `uninterp` function that can be used in specifications.
267    #[inline]
268    #[verifier::external_body]
269    #[verifier::when_used_as_spec(new_spec)]
270    #[verus_spec(r =>
271        ensures
272            r == Self::new_spec(),
273    )]
274    pub fn new() -> Self {
275        unimplemented!()
276    }
277
278    /// Gets an immutable cursor in the virtual address range.
279    ///
280    /// The cursor behaves like a lock guard, exclusively owning a sub-tree of
281    /// the page table, preventing others from creating a cursor in it. So be
282    /// sure to drop the cursor as soon as possible.
283    ///
284    /// The creation of the cursor may block if another cursor having an
285    /// overlapping range is alive.
286    #[verifier::external_body]
287    pub fn cursor<G: InAtomicMode>(&'a self, guard: &'a G, va: &Range<Vaddr>) -> Result<
288        Cursor<'a, G>,
289    > {
290        Ok(self.pt.cursor(guard, va).map(|pt_cursor| Cursor(pt_cursor.0))?)
291    }
292
293    /// Gets an mutable cursor in the virtual address range.
294    ///
295    /// The same as [`Self::cursor`], the cursor behaves like a lock guard,
296    /// exclusively owning a sub-tree of the page table, preventing others
297    /// from creating a cursor in it. So be sure to drop the cursor as soon as
298    /// possible.
299    ///
300    /// The creation of the cursor may block if another cursor having an
301    /// overlapping range is alive. The modification to the mapping by the
302    /// cursor may also block or be overridden the mapping of another cursor.
303    pub fn cursor_mut<G: InAtomicMode>(&'a self, guard: &'a G, va: &Range<Vaddr>) -> Result<
304        CursorMut<'a, G>,
305    > {
306        Ok(
307            self.pt.cursor_mut(guard, va).map(
308                |pt_cursor|
309                    CursorMut {
310                        pt_cursor: pt_cursor.0,
311                        flusher: TlbFlusher::new(
312                            &self.cpus  /*, disable_preempt()*/
313                            ,
314                        ),
315                    },
316            )?,
317        )
318    }
319
320    /// Creates a reader to read data from the user space of the current task.
321    ///
322    /// Returns [`Err`] if this [`VmSpace`] is not belonged to the user space of the current task
323    /// or the `vaddr` and `len` do not represent a user space memory range.
324    ///
325    /// Users must ensure that no other page table is activated in the current task during the
326    /// lifetime of the created [`VmReader`]. This guarantees that the [`VmReader`] can operate
327    /// correctly.
328    /// # Verified Properties
329    /// ## Preconditions
330    /// - The [`VmSpace`] invariants must hold with respect to the [`VmSpaceOwner`], which must be active.
331    /// - The range `vaddr..vaddr + len` must represent a user space memory range.
332    /// - The [`VmSpaceOwner`] must not have any active writers overlapping with the range `vaddr..vaddr + len`.
333    /// ## Postconditions
334    /// - An inactive [`VmReader`] will be created with the range `vaddr..vaddr + len`.
335    /// ## Safety
336    /// - The function preserves all memory invariants.
337    /// - By requiring that the [`VmSpaceOwner`] must not have any active writers overlapping with the target range,
338    /// it prevents data races between the reader and any writers.
339    #[inline]
340    #[verus_spec(r =>
341        with
342            Tracked(owner): Tracked<&'a mut VmSpaceOwner<'a>>,
343                -> vm_reader_owner: Tracked<Option<VmIoOwner<'a>>>,
344        requires
345            self.reader_requires(*old(owner), vaddr, len),
346        ensures
347            self.reader_ensures(*old(owner), *owner, vaddr, len, r, vm_reader_owner@),
348    )]
349    pub fn reader(&self, vaddr: Vaddr, len: usize) -> Result<VmReader<'a>> {
350        let vptr = VirtPtr::from_vaddr(vaddr, len);
351        let ghost id = owner.new_vm_io_id();
352        proof_decl! {
353            let tracked mut vm_reader_owner;
354        }
355        // SAFETY: The memory range is in user space, as checked above.
356        let reader = unsafe {
357            proof_with!(Ghost(id) => Tracked(vm_reader_owner));
358            VmReader::from_user_space(vptr)
359        };
360
361        proof_with!(|= Tracked(Some(vm_reader_owner)));
362        Ok(reader)
363    }
364
365    /// Returns [`Err`] if this [`VmSpace`] is not belonged to the user space of the current task
366    /// or the `vaddr` and `len` do not represent a user space memory range.
367    ///
368    /// Users must ensure that no other page table is activated in the current task during the
369    /// lifetime of the created [`VmWriter`]. This guarantees that the [`VmWriter`] can operate correctly.
370    /// # Verified Properties
371    /// ## Preconditions
372    /// - The [`VmSpace`] invariants must hold with respect to the [`VmSpaceOwner`], which must be active.
373    /// - The range `vaddr..vaddr + len` must represent a user space memory range.
374    /// - The [`VmSpaceOwner`] must not have any active readers or writers overlapping with the range `vaddr..vaddr + len`.
375    /// ## Postconditions
376    /// - An inactive [`VmWriter`] will be created with the range `vaddr..vaddr + len`.
377    /// ## Safety
378    /// - The function preserves all memory invariants.
379    /// - By requiring that the [`VmSpaceOwner`] must not have any active readers or writers overlapping with the target range,
380    /// it prevents data races.
381    #[inline]
382    #[verus_spec(r =>
383        with
384            Tracked(owner): Tracked<&mut VmSpaceOwner<'a>>,
385                -> new_owner: Tracked<Option<VmIoOwner<'a>>>,
386        requires
387            self.writer_requires(*old(owner), vaddr, len),
388        ensures
389            self.writer_ensures(*old(owner), *owner, vaddr, len, r, new_owner@),
390    )]
391    pub fn writer(self, vaddr: Vaddr, len: usize) -> Result<VmWriter<'a>> {
392        let vptr = VirtPtr::from_vaddr(vaddr, len);
393        let ghost id = owner.new_vm_io_id();
394        proof_decl! {
395            let tracked mut vm_writer_owner;
396        }
397
398        // `VmWriter` is neither `Sync` nor `Send`, so it will not live longer than the current
399        // task. This ensures that the correct page table is activated during the usage period of
400        // the `VmWriter`.
401        //
402        // SAFETY: The memory range is in user space, as checked above.
403        let writer = unsafe {
404            proof_with!(Ghost(id) => Tracked(vm_writer_owner));
405            VmWriter::from_user_space(vptr)
406        };
407
408        proof_with!(|= Tracked(Some(vm_writer_owner)));
409        Ok(writer)
410    }
411}
412
413/// The cursor for querying over the VM space without modifying it.
414///
415/// It exclusively owns a sub-tree of the page table, preventing others from
416/// reading or modifying the same sub-tree. Two read-only cursors can not be
417/// created from the same virtual address range either.
418pub struct Cursor<'a, A: InAtomicMode>(pub crate::mm::page_table::Cursor<'a, UserPtConfig, A>);
419
420#[verus_verify]
421impl<'rcu, A: InAtomicMode> Cursor<'rcu, A> {
422    pub open spec fn query_success_requires(self) -> bool {
423        self.0.barrier_va.start <= self.0.va < self.0.barrier_va.end
424    }
425
426    pub open spec fn query_success_ensures(
427        self,
428        view: CursorView<UserPtConfig>,
429        range: Range<Vaddr>,
430        item: Option<MappedItem>,
431    ) -> bool {
432        if view.present() {
433            &&& item is Some
434            &&& view.query_item_spec(item.unwrap()) == Some(range)
435        } else {
436            &&& range.start == self.0.va
437            &&& item is None
438        }
439    }
440
441    /// Queries the mapping at the current virtual address.
442    ///
443    /// If the cursor is pointing to a valid virtual address that is locked,
444    /// it will return the virtual address range and the mapped item.
445    /// ## Preconditions
446    /// - All system invariants must hold
447    /// - **Liveness**: The function will return an error if the cursor is not within the locked range
448    /// ## Postconditions
449    /// - If there is a mapped item at the current virtual address ([`query_some_condition`]),
450    /// it is returned along with the virtual address range that it maps ([`query_success_ensures`]).
451    /// - The mapping that is returned corresponds to the abstract mapping given by [`query_item_spec`](CursorView::query_item_spec).
452    /// - If there is no mapped item at the current virtual address ([`query_none_condition`]),
453    /// it returns [`None`], and the virtual address range of the cursor's current position.
454    /// ## Safety
455    /// - This function preserves all memory invariants.
456    /// - The locking mechanism prevents data races.
457    #[verus_spec(r =>
458        with Tracked(owner): Tracked<&mut CursorOwner<'rcu, UserPtConfig>>,
459            Tracked(regions): Tracked<&mut MetaRegionOwners>,
460            Tracked(guards): Tracked<&mut Guards<'rcu, UserPtConfig>>
461        requires
462            old(self).0.invariants(*old(owner), *old(regions), *old(guards)),
463            old(owner).in_locked_range(),
464        ensures
465            self.0.invariants(*owner, *regions, *guards),
466            self.0.query_some_condition(*owner) ==> {
467                &&& r is Ok
468                &&& self.0.query_some_ensures(*owner, r.unwrap())
469            },
470            !self.0.query_some_condition(*owner) ==> {
471                &&& r is Ok
472                &&& self.0.query_none_ensures(*owner, r.unwrap())
473            },
474    )]
475    pub fn query(&mut self) -> Result<(Range<Vaddr>, Option<MappedItem>)> {
476        Ok(
477            #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
478            self.0.query()?,
479        )
480    }
481
482    /// Moves the cursor forward to the next mapped virtual address.
483    ///
484    /// If there is mapped virtual address following the current address within
485    /// next `len` bytes, it will return that mapped address. In this case,
486    /// the cursor will stop at the mapped address.
487    ///
488    /// Otherwise, it will return `None`. And the cursor may stop at any
489    /// address after `len` bytes.
490    ///
491    /// # Verified Properties
492    /// ## Preconditions
493    /// - **Liveness**: The cursor must be within the locked range and below the guard level.
494    /// - **Liveness**: The length must be page-aligned and less than or equal to the remaining range of the cursor.
495    /// ## Postconditions
496    /// - If there is a mapped address after the current address within the next `len` bytes,
497    /// it will move the cursor to the next mapped address and return it.
498    /// - If not, it will return `None`. The cursor may stop at any
499    /// address after `len` bytes, but it will not move past the barrier address.
500    /// ## Panics
501    /// This method panics if the length is longer than the remaining range of the cursor.
502    /// ## Safety
503    /// This function preserves all memory invariants.
504    /// Because it panics rather than move the cursor to an invalid address,
505    /// it ensures that the cursor is safe to use after the call.
506    /// The locking mechanism prevents data races.
507    #[verus_spec(res =>
508        with Tracked(owner): Tracked<&mut CursorOwner<'rcu, UserPtConfig>>,
509            Tracked(regions): Tracked<&mut MetaRegionOwners>,
510            Tracked(guards): Tracked<&mut Guards<'rcu, UserPtConfig>>
511        requires
512            old(self).0.invariants(*old(owner), *old(regions), *old(guards)),
513            old(self).0.level < old(self).0.guard_level,
514            old(owner).in_locked_range(),
515            len % PAGE_SIZE == 0,
516            old(self).0.va + len <= old(self).0.barrier_va.end,
517        ensures
518            self.0.invariants(*owner, *regions, *guards),
519            res is Some ==> {
520                &&& res.unwrap() == self.0.va
521                &&& owner.level < owner.guard_level
522                &&& owner.in_locked_range()
523            },
524    )]
525    pub fn find_next(&mut self, len: usize) -> (res: Option<Vaddr>) {
526        #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
527        self.0.find_next(len)
528    }
529
530    /// Jump to the virtual address.
531    ///
532    /// This function will move the cursor to the given virtual address.
533    /// If the target address is not in the locked range, it will return an error.
534    /// # Verified Properties
535    /// ## Preconditions
536    /// The cursor must be within the locked range and below the guard level.
537    /// ## Postconditions
538    /// - If the target address is in the locked range, it will move the cursor to the given address.
539    /// - If the target address is not in the locked range, it will return an error.
540    /// ## Panics
541    /// This method panics if the target address is not aligned to the page size.
542    /// ## Safety
543    /// This function preserves all memory invariants.
544    /// Because it throws an error rather than move the cursor to an invalid address,
545    /// it ensures that the cursor is safe to use after the call.
546    /// The locking mechanism prevents data races.
547    #[verus_spec(res =>
548        with Tracked(owner): Tracked<&mut CursorOwner<'rcu, UserPtConfig>>,
549            Tracked(regions): Tracked<&mut MetaRegionOwners>,
550            Tracked(guards): Tracked<&mut Guards<'rcu, UserPtConfig>>
551        requires
552            old(self).0.invariants(*old(owner), *old(regions), *old(guards)),
553            old(self).0.level < old(self).0.guard_level,
554            old(owner).in_locked_range(),
555            old(self).0.jump_panic_condition(va),
556        ensures
557            self.0.invariants(*owner, *regions, *guards),
558            self.0.barrier_va.start <= va < self.0.barrier_va.end ==> {
559                &&& res is Ok
560                &&& self.0.va == va
561            },
562            !(self.0.barrier_va.start <= va < self.0.barrier_va.end) ==> res is Err,
563    )]
564    pub fn jump(&mut self, va: Vaddr) -> Result<()> {
565        (#[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
566        self.0.jump(va))?;
567        Ok(())
568    }
569
570    /// Get the virtual address of the current slot.
571    pub fn virt_addr(&self) -> Vaddr
572        returns
573            self.0.va,
574    {
575        self.0.virt_addr()
576    }
577}
578
579/// The cursor for modifying the mappings in VM space.
580///
581/// It exclusively owns a sub-tree of the page table, preventing others from
582/// reading or modifying the same sub-tree.
583pub struct CursorMut<'a, A: InAtomicMode> {
584    pub pt_cursor: crate::mm::page_table::CursorMut<'a, UserPtConfig, A>,
585    // We have a read lock so the CPU set in the flusher is always a superset
586    // of actual activated CPUs.
587    pub flusher: TlbFlusher<'a  /*, DisabledPreemptGuard*/ >,
588}
589
590impl<'a, A: InAtomicMode> CursorMut<'a, A> {
591    pub open spec fn query_requires(
592        cursor: Self,
593        owner: CursorOwner<'a, UserPtConfig>,
594        guard_perm: vstd::simple_pptr::PointsTo<PageTableGuard<'a, UserPtConfig>>,
595        regions: MetaRegionOwners,
596    ) -> bool {
597        &&& cursor.pt_cursor.inner.wf(owner)
598        &&& owner.inv()
599        &&& regions.inv()
600    }
601
602    pub open spec fn query_ensures(
603        old_cursor: Self,
604        new_cursor: Self,
605        owner: CursorOwner<'a, UserPtConfig>,
606        guard_perm: vstd::simple_pptr::PointsTo<PageTableGuard<'a, UserPtConfig>>,
607        old_regions: MetaRegionOwners,
608        new_regions: MetaRegionOwners,
609        r: Result<(Range<Vaddr>, Option<MappedItem>)>,
610    ) -> bool {
611        &&& new_regions.inv()
612        &&& new_cursor.pt_cursor.inner.wf(owner)
613    }
614
615    /// Queries the mapping at the current virtual address.
616    ///
617    /// This is the same as [`Cursor::query`].
618    ///
619    /// If the cursor is pointing to a valid virtual address that is locked,
620    /// it will return the virtual address range and the mapped item.
621    /// ## Preconditions
622    /// - All system invariants must hold
623    /// - **Liveness**: The function will return an error if the cursor is not within the locked range
624    /// ## Postconditions
625    /// - If there is a mapped item at the current virtual address ([`query_some_condition`]),
626    /// it is returned along with the virtual address range that it maps ([`query_success_ensures`]).
627    /// - The mapping that is returned corresponds to the abstract mapping given by [`query_item_spec`](CursorView::query_item_spec).
628    /// - If there is no mapped item at the current virtual address ([`query_none_condition`]),
629    /// it returns `None`, and the virtual address range of the cursor's current position.
630    /// ## Safety
631    /// - This function preserves all memory invariants.
632    /// - The locking mechanism prevents data races.
633    #[verus_spec(res =>
634        with Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
635            Tracked(regions): Tracked<&mut MetaRegionOwners>,
636            Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>
637        requires
638            old(self).pt_cursor.inner.invariants(*old(owner), *old(regions), *old(guards)),
639            old(owner).in_locked_range(),
640        ensures
641            self.pt_cursor.inner.invariants(*owner, *regions, *guards),
642            old(self).pt_cursor.inner.query_some_condition(*owner) ==> {
643                &&& res is Ok
644                &&& self.pt_cursor.inner.query_some_ensures(*owner, res.unwrap())
645            },
646            !old(self).pt_cursor.inner.query_some_condition(*owner) ==> {
647                &&& res is Ok
648                &&& self.pt_cursor.inner.query_none_ensures(*owner, res.unwrap())
649            },
650    )]
651    pub fn query(&mut self) -> Result<(Range<Vaddr>, Option<MappedItem>)> {
652        Ok(
653            #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
654            self.pt_cursor.query()?,
655        )
656    }
657
658    /// Moves the cursor forward to the next mapped virtual address.
659    ///
660    /// This is the same as [`Cursor::find_next`].
661    ///
662    /// # Verified Properties
663    /// ## Preconditions
664    /// - **Liveness**: The cursor must be within the locked range and below the guard level.
665    /// The length must be page-aligned and less than or equal to the remaining range of the cursor.
666    /// ## Postconditions
667    /// - If there is a mapped address after the current address within the next `len` bytes,
668    /// it will move the cursor to the next mapped address and return it.
669    /// - If not, it will return `None`. The cursor may stop at any
670    /// address after `len` bytes, but it will not move past the barrier address.
671    /// ## Panics
672    /// This method panics if the length is longer than the remaining range of the cursor.
673    /// ## Safety
674    /// This function preserves all memory invariants.
675    /// Because it panics rather than move the cursor to an invalid address,
676    /// it ensures that the cursor is safe to use after the call.
677    /// The locking mechanism prevents data races.
678    #[verus_spec(res =>
679        with Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
680            Tracked(regions): Tracked<&mut MetaRegionOwners>,
681            Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>
682    )]
683    pub fn find_next(&mut self, len: usize) -> (res: Option<Vaddr>)
684        requires
685            old(self).pt_cursor.inner.invariants(*old(owner), *old(regions), *old(guards)),
686            old(self).pt_cursor.inner.level < old(self).pt_cursor.inner.guard_level,
687            old(owner).in_locked_range(),
688            len % PAGE_SIZE == 0,
689            old(self).pt_cursor.inner.va + len <= old(self).pt_cursor.inner.barrier_va.end,
690        ensures
691            self.pt_cursor.inner.invariants(*owner, *regions, *guards),
692            res is Some ==> {
693                &&& res.unwrap() == self.pt_cursor.inner.va
694                &&& owner.level < owner.guard_level
695                &&& owner.in_locked_range()
696            },
697    {
698        #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
699        self.pt_cursor.find_next(len)
700    }
701
702    /// Jump to the virtual address.
703    ///
704    /// This is the same as [`Cursor::jump`].
705    ///
706    /// This function will move the cursor to the given virtual address.
707    /// If the target address is not in the locked range, it will return an error.
708    /// # Verified Properties
709    /// ## Preconditions
710    /// The cursor must be within the locked range and below the guard level.
711    /// ## Postconditions
712    /// - If the target address is in the locked range, it will move the cursor to the given address.
713    /// - If the target address is not in the locked range, it will return an error.
714    /// ## Panics
715    /// This method panics if the target address is not aligned to the page size.
716    /// ## Safety
717    /// This function preserves all memory invariants.
718    /// Because it throws an error rather than move the cursor to an invalid address,
719    /// it ensures that the cursor is safe to use after the call.
720    /// The locking mechanism prevents data races.
721    #[verus_spec(res =>
722        with
723            Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
724            Tracked(regions): Tracked<&mut MetaRegionOwners>,
725            Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>
726        requires
727            old(self).pt_cursor.inner.invariants(*old(owner), *old(regions), *old(guards)),
728            old(self).pt_cursor.inner.level < old(self).pt_cursor.inner.guard_level,
729            old(owner).in_locked_range(),
730            old(self).pt_cursor.inner.jump_panic_condition(va),
731        ensures
732            self.pt_cursor.inner.invariants(*owner, *regions, *guards),
733            self.pt_cursor.inner.barrier_va.start <= va < self.pt_cursor.inner.barrier_va.end ==> {
734                &&& res is Ok
735                &&& self.pt_cursor.inner.va == va
736            },
737            !(self.pt_cursor.inner.barrier_va.start <= va < self.pt_cursor.inner.barrier_va.end) ==> res is Err,
738    )]
739    pub fn jump(&mut self, va: Vaddr) -> Result<()> {
740        (#[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
741        self.pt_cursor.jump(va))?;
742        Ok(())
743    }
744
745    /// Get the virtual address of the current slot.
746    #[verus_spec(r =>
747        returns
748            self.pt_cursor.inner.va,
749    )]
750    pub fn virt_addr(&self) -> Vaddr {
751        self.pt_cursor.virt_addr()
752    }
753
754    /// Get the dedicated TLB flusher for this cursor.
755    pub fn flusher(&self) -> &TlbFlusher<'a> {
756        &self.flusher
757    }
758
759    /// Collects the invariants of the cursor, its owner, and associated tracked structures.
760    /// The cursor must be well-formed with respect to its owner. This will hold before and after the call to `map`.
761    pub open spec fn map_cursor_inv(
762        self,
763        cursor_owner: CursorOwner<'a, UserPtConfig>,
764        guards: Guards<'a, UserPtConfig>,
765        regions: MetaRegionOwners,
766    ) -> bool {
767        &&& cursor_owner.inv()
768        &&& self.pt_cursor.inner.wf(cursor_owner)
769        &&& self.pt_cursor.inner.inv()
770        &&& cursor_owner.children_not_locked(guards)
771        &&& cursor_owner.nodes_locked(guards)
772        &&& cursor_owner.relate_region(regions)
773        &&& !cursor_owner.popped_too_high
774        &&& regions.inv()
775    }
776
777    /// These conditions must hold before the call to `map` but may not be maintained after the call.
778    /// The cursor must be within the locked range and below the guard level, but it may move outside the
779    /// range if the frame being mapped is exactly the length of the remaining range.
780    pub open spec fn map_cursor_requires(
781        self,
782        cursor_owner: CursorOwner<'a, UserPtConfig>,
783    ) -> bool {
784        &&& cursor_owner.in_locked_range()
785        &&& self.pt_cursor.inner.level < self.pt_cursor.inner.guard_level
786        &&& self.pt_cursor.inner.va < self.pt_cursor.inner.barrier_va.end
787    }
788
789    /// Collects the conditions that must hold on the frame being mapped.
790    /// The frame must be well-formed with respect to the entry owner. When converted into a `MappedItem`,
791    /// its physical address must also match, and its level must be between 1 and the highest translation level.
792    pub open spec fn map_item_requires(
793        self,
794        frame: UFrame,
795        prop: PageProperty,
796        entry_owner: EntryOwner<UserPtConfig>,
797    ) -> bool {
798        let item = MappedItem { frame: frame, prop: prop };
799        let (paddr, level, prop0) = UserPtConfig::item_into_raw_spec(item);
800        &&& prop == prop0
801        &&& entry_owner.frame.unwrap().mapped_pa == paddr
802        &&& entry_owner.frame.unwrap().prop == prop
803        &&& level <= UserPtConfig::HIGHEST_TRANSLATION_LEVEL()
804        &&& 1 <= level <= NR_LEVELS  // Should be property of item_into_raw
805        &&& Child::Frame(paddr, level, prop0).wf(entry_owner)
806        &&& self.pt_cursor.inner.va + page_size(level) <= self.pt_cursor.inner.barrier_va.end
807        &&& entry_owner.inv()
808        &&& self.pt_cursor.inner.va % page_size(level) == 0
809    }
810
811    /// The result of a call to `map`. Constructs a `Mapping` from the frame being mapped and the cursor's current virtual address.
812    /// The page table's cursor view will be updated with this mapping, replacing the previous mapping (if any).
813    pub open spec fn map_item_ensures(
814        self,
815        frame: UFrame,
816        prop: PageProperty,
817        old_cursor_view: CursorView<UserPtConfig>,
818        cursor_view: CursorView<UserPtConfig>,
819    ) -> bool {
820        let item = MappedItem { frame: frame, prop: prop };
821        let (paddr, level, prop0) = UserPtConfig::item_into_raw_spec(item);
822        cursor_view == old_cursor_view.map_spec(paddr, page_size(level), prop)
823    }
824
825    /// Map a frame into the current slot.
826    ///
827    /// This method will bring the cursor to the next slot after the modification.
828    /// # Verified Properties
829    /// ## Preconditions
830    /// - The cursor must be within the locked range and below the guard level,
831    /// and the frame must fit within the remaining range of the cursor.
832    /// - The cursor must satisfy all invariants, and the frame must be well-formed when converted into a `MappedItem` ([`map_item_requires`](Self::map_item_requires)).
833    /// ## Postconditions
834    /// After the call, the cursor will satisfy all invariants, and will map the frame into the current slot according to [`map_spec`](CursorView::map_spec).
835    /// After the call, the TLB will not contain any entries for the virtual address range being mapped (TODO).
836    /// ## Safety
837    /// The preconditions of this function require that the frame to be mapped is disjoint from any other mapped frames.
838    /// If this is not the case, the global memory invariants will be violated. If the allocator implementation is correct,
839    /// the user shouldn't be able to create such a frame object in the first place, but currently a proof of that is
840    /// outside of the verification boundary.
841    /// Because this function flushes the TLB if it unmaps a page, it preserves TLB consistency.
842    #[verus_spec(
843        with Tracked(cursor_owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
844            Tracked(entry_owner): Tracked<EntryOwner<UserPtConfig>>,
845            Tracked(regions): Tracked<&mut MetaRegionOwners>,
846            Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>,
847            Tracked(tlb_model): Tracked<&mut TlbModel>
848        requires
849            old(tlb_model).inv(),
850            old(self).map_cursor_requires(*old(cursor_owner)),
851            old(self).map_cursor_inv(*old(cursor_owner), *old(guards), *old(regions)),
852            old(self).map_item_requires(frame, prop, entry_owner),
853        ensures
854            self.map_cursor_inv(*cursor_owner, *guards, *regions),
855            old(self).map_item_ensures(
856                frame,
857                prop,
858                old(self).pt_cursor.inner.model(*old(cursor_owner)),
859                self.pt_cursor.inner.model(*cursor_owner),
860            ),
861    )]
862    pub fn map(&mut self, frame: UFrame, prop: PageProperty) {
863        let start_va = self.virt_addr();
864        let item = MappedItem { frame: frame, prop: prop };
865
866        assert(crate::mm::page_table::CursorMut::<'a, UserPtConfig, A>::item_not_mapped(
867            item,
868            *old(regions),
869        )) by { admit() };
870
871        // SAFETY: It is safe to map untyped memory into the userspace.
872        let Err(frag) = (
873        #[verus_spec(with Tracked(cursor_owner), Tracked(entry_owner), Tracked(regions), Tracked(guards))]
874        self.pt_cursor.map(item)) else {
875            return ;  // No mapping exists at the current address.
876        };
877
878        match frag {
879            PageTableFrag::Mapped { va, item } => {
880                //debug_assert_eq!(va, start_va);
881                let old_frame = item.frame;
882
883                #[verus_spec(with Tracked(tlb_model))]
884                self.flusher.issue_tlb_flush_with(TlbFlushOp::Address(start_va), old_frame.into());
885                #[verus_spec(with Tracked(tlb_model))]
886                self.flusher.dispatch_tlb_flush();
887            },
888            PageTableFrag::StrayPageTable { .. } => {
889                assert(false) by { admit() };
890                //panic!("`UFrame` is base page sized but re-mapping out a child PT");
891            },
892        }
893    }
894
895    /// Clears the mapping starting from the current slot,
896    /// and returns the number of unmapped pages.
897    ///
898    /// This method will bring the cursor forward by `len` bytes in the virtual
899    /// address space after the modification.
900    ///
901    /// Already-absent mappings encountered by the cursor will be skipped. It
902    /// is valid to unmap a range that is not mapped.
903    ///
904    /// It must issue and dispatch a TLB flush after the operation. Otherwise,
905    /// the memory safety will be compromised. Please call this function less
906    /// to avoid the overhead of TLB flush. Using a large `len` is wiser than
907    /// splitting the operation into multiple small ones.
908    ///
909    /// # Panics
910    /// Panics if:
911    ///  - the length is longer than the remaining range of the cursor;
912    ///  - the length is not page-aligned.
913    #[verus_spec(r =>
914        with Tracked(cursor_owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
915            Tracked(regions): Tracked<&mut MetaRegionOwners>,
916            Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>,
917            Tracked(tlb_model): Tracked<&mut TlbModel>
918        requires
919            len > 0,
920            len % PAGE_SIZE == 0,
921            old(self).pt_cursor.inner.va % PAGE_SIZE == 0,
922            old(self).pt_cursor.inner.va + len <= KERNEL_VADDR_RANGE.end as int,
923            old(self).pt_cursor.inner.invariants(*old(cursor_owner), *old(regions), *old(guards)),
924            old(cursor_owner).in_locked_range(),
925            old(self).pt_cursor.inner.va + len <= old(self).pt_cursor.inner.barrier_va.end,
926            old(tlb_model).inv(),
927        ensures
928            self.pt_cursor.inner.invariants(*cursor_owner, *regions, *guards),
929            old(self).pt_cursor.inner.model(*old(cursor_owner)).unmap_spec(len, self.pt_cursor.inner.model(*cursor_owner), r),
930            tlb_model.inv(),
931    )]
932    pub fn unmap(&mut self, len: usize) -> usize {
933        proof {
934            assert(cursor_owner.va.reflect(self.pt_cursor.inner.va));
935            assert(cursor_owner.inv());
936            cursor_owner.va.reflect_prop(self.pt_cursor.inner.va);
937        }
938
939        let end_va = self.virt_addr() + len;
940        let mut num_unmapped: usize = 0;
941
942        let ghost start_mappings: Set<Mapping> = cursor_owner@.mappings;
943        let ghost start_va: Vaddr = cursor_owner@.cur_va;
944
945        proof {
946            assert((self.pt_cursor.inner.va + len) % PAGE_SIZE as int == 0) by (compute);
947
948            assert(end_va as int == start_va + len);
949            assert(start_mappings.filter(|m: Mapping| start_va <= m.va_range.start < start_va)
950                =~= Set::<Mapping>::empty());
951            assert(start_mappings.difference(Set::<Mapping>::empty()) =~= start_mappings);
952            assume(start_mappings.finite());
953        }
954
955        #[verus_spec(
956            invariant
957                self.pt_cursor.inner.va <= end_va,
958                self.pt_cursor.inner.va % PAGE_SIZE == 0,
959                end_va % PAGE_SIZE == 0,
960                self.pt_cursor.inner.invariants(*cursor_owner, *regions, *guards),
961                cursor_owner.in_locked_range(),
962                end_va <= self.pt_cursor.inner.barrier_va.end,
963                tlb_model.inv(),
964                start_va <= cursor_owner@.cur_va,
965                cursor_owner@.mappings =~= start_mappings.difference(
966                    start_mappings.filter(|m: Mapping| start_va <= m.va_range.start < cursor_owner@.cur_va)),
967                num_unmapped as nat == start_mappings.filter(
968                    |m: Mapping| start_va <= m.va_range.start < cursor_owner@.cur_va).len(),
969                start_mappings =~= old(cursor_owner)@.mappings,
970                start_va == old(cursor_owner)@.cur_va,
971                start_mappings.finite(),
972            invariant_except_break
973                self.pt_cursor.inner.va < end_va,
974            ensures
975                self.pt_cursor.inner.va >= end_va,
976            decreases end_va - self.pt_cursor.inner.va
977        )]
978        loop {
979            let ghost prev_va: Vaddr = cursor_owner@.cur_va;
980            let ghost prev_mappings: Set<Mapping> = cursor_owner@.mappings;
981            let ghost num_unmapped_before: nat = num_unmapped as nat;
982
983            proof {
984                assert(self.pt_cursor.inner.wf(*cursor_owner));
985                assert(cursor_owner.inv());
986                cursor_owner.va.reflect_prop(self.pt_cursor.inner.va);
987                assert(prev_va == self.pt_cursor.inner.va);
988            }
989
990            // SAFETY: It is safe to un-map memory in the userspace.
991            let Some(frag) = (unsafe {
992                #[verus_spec(with Tracked(cursor_owner), Tracked(regions), Tracked(guards))]
993                self.pt_cursor.take_next(end_va - self.virt_addr())
994            }) else {
995                proof {
996                    assert(self.pt_cursor.inner.wf(*cursor_owner));
997                    assert(cursor_owner.inv());
998                    cursor_owner.va.reflect_prop(self.pt_cursor.inner.va);
999
1000                    assert(cursor_owner@.cur_va == end_va);
1001                    assert(cursor_owner@.cur_va >= end_va);
1002                    assert(self.pt_cursor.inner.va == end_va);
1003
1004                    assert(start_mappings.filter(|m: Mapping| prev_va <= m.va_range.start < end_va)
1005                        =~= Set::<Mapping>::empty()) by {
1006                        assert forall|m: Mapping|
1007                            #![auto]
1008                            start_mappings.contains(m) && prev_va <= m.va_range.start
1009                                && m.va_range.start < end_va implies false by {
1010                            assert(!(start_va <= m.va_range.start && m.va_range.start < prev_va));
1011                            assert(prev_mappings.contains(m));
1012                            assert(prev_mappings.filter(
1013                                |m: Mapping| prev_va <= m.va_range.start < end_va,
1014                            ).contains(m));
1015                        };
1016                    };
1017
1018                    assert forall|m: Mapping| #[trigger]
1019                        start_mappings.contains(m) && prev_va
1020                            <= m.va_range.start implies m.va_range.start >= end_va by {
1021                        if start_mappings.contains(m) && prev_va <= m.va_range.start
1022                            && m.va_range.start < end_va {
1023                            assert(!(start_va <= m.va_range.start && m.va_range.start < prev_va));
1024                            assert(prev_mappings.contains(m));
1025                            assert(prev_mappings.filter(
1026                                |m: Mapping| prev_va <= m.va_range.start < end_va,
1027                            ).contains(m));
1028                        }
1029                    };
1030
1031                    // filter([start_va, end_va)) == filter([start_va, prev_va))
1032                    assert(start_mappings.filter(|m: Mapping| start_va <= m.va_range.start < end_va)
1033                        =~= start_mappings.filter(
1034                        |m: Mapping| start_va <= m.va_range.start < prev_va,
1035                    ));
1036
1037                    // filter([start_va, cursor_va)) == filter([start_va, end_va))
1038                    assert(start_mappings.filter(
1039                        |m: Mapping| start_va <= m.va_range.start < cursor_owner@.cur_va,
1040                    ) =~= start_mappings.filter(
1041                        |m: Mapping| start_va <= m.va_range.start < prev_va,
1042                    ));
1043                    // Since cursor_owner@.cur_va == end_va, the filter predicates are identical
1044                    assert(start_mappings.filter(
1045                        |m: Mapping| start_va <= m.va_range.start < cursor_owner@.cur_va,
1046                    ) =~= start_mappings.filter(
1047                        |m: Mapping| start_va <= m.va_range.start < end_va,
1048                    ));
1049                    assert(start_mappings.filter(
1050                        |m: Mapping| start_va <= m.va_range.start < cursor_owner@.cur_va,
1051                    ) =~= start_mappings.filter(
1052                        |m: Mapping| start_va <= m.va_range.start < prev_va,
1053                    ));
1054                }
1055                break ;
1056            };
1057
1058            let ghost step_removed_len: nat = prev_mappings.filter(
1059                |m: Mapping| prev_va <= m.va_range.start < cursor_owner@.cur_va,
1060            ).len();
1061
1062            proof {
1063                // Re-establish reflect for post-call state
1064                assert(self.pt_cursor.inner.wf(*cursor_owner));
1065                assert(cursor_owner.inv());
1066                cursor_owner.va.reflect_prop(self.pt_cursor.inner.va);
1067
1068                let new_va = cursor_owner@.cur_va;
1069
1070                assert(cursor_owner@.mappings =~= start_mappings.difference(
1071                    start_mappings.filter(|m: Mapping| start_va <= m.va_range.start < new_va),
1072                )) by {
1073                    assert forall|m: Mapping|
1074                        #![auto]
1075                        cursor_owner@.mappings.contains(m) <==> (start_mappings.contains(m) && !(
1076                        start_va <= m.va_range.start && m.va_range.start < new_va)) by {
1077                        // LHS: m in prev_mappings AND NOT in step_removed
1078                        // RHS: m in start_mappings AND NOT in [start_va, new_va)
1079                        if start_mappings.contains(m) {
1080                            if start_va <= m.va_range.start && m.va_range.start < prev_va {
1081                                // m in removed_before => not in prev_mappings => not in LHS
1082                                assert(!prev_mappings.contains(m));
1083                                assert(!cursor_owner@.mappings.contains(m));
1084                            } else if prev_va <= m.va_range.start && m.va_range.start < new_va {
1085                                // m not in removed_before => in prev_mappings
1086                                // m in step_removed => not in LHS
1087                                assert(prev_mappings.contains(m));
1088                                assert(!cursor_owner@.mappings.contains(m));
1089                            } else {
1090                                // m not in removed_before => in prev_mappings
1091                                // m not in step_removed => in LHS
1092                                assert(prev_mappings.contains(m));
1093                                assert(cursor_owner@.mappings.contains(m));
1094                            }
1095                        }
1096                    };
1097                };
1098
1099                let f_prev = start_mappings.filter(
1100                    |m: Mapping| start_va <= m.va_range.start < prev_va,
1101                );
1102                let f_step = start_mappings.filter(
1103                    |m: Mapping| prev_va <= m.va_range.start < new_va,
1104                );
1105                let f_all = start_mappings.filter(
1106                    |m: Mapping| start_va <= m.va_range.start < new_va,
1107                );
1108
1109                assert(f_step =~= prev_mappings.filter(
1110                    |m: Mapping| prev_va <= m.va_range.start < new_va,
1111                )) by {
1112                    assert forall|m: Mapping|
1113                        #![auto]
1114                        f_step.contains(m) <==> prev_mappings.filter(
1115                            |m: Mapping| prev_va <= m.va_range.start < new_va,
1116                        ).contains(m) by {
1117                        if start_mappings.contains(m) && prev_va <= m.va_range.start
1118                            && m.va_range.start < new_va {
1119                            assert(!(start_va <= m.va_range.start && m.va_range.start < prev_va));
1120                            assert(prev_mappings.contains(m));
1121                        }
1122                    };
1123                };
1124
1125                assert(f_all =~= f_prev + f_step);
1126                // Disjoint finite sets: |A + B| = |A| + |B|
1127                vstd::set_lib::lemma_set_disjoint_lens(f_prev, f_step);
1128            }
1129
1130            let ghost mut step_delta: nat = 0;
1131            match frag {
1132                PageTableFrag::Mapped { va, item, .. } => {
1133                    let frame = item.frame;
1134                    assume(num_unmapped < usize::MAX);
1135                    num_unmapped += 1;
1136                    proof {
1137                        step_delta = 1;
1138                    }
1139                    #[verus_spec(with Tracked(tlb_model))]
1140                    self.flusher.issue_tlb_flush_with(TlbFlushOp::Address(va), frame.into());
1141                },
1142                PageTableFrag::StrayPageTable { pt, va, len, num_frames } => {
1143                    assume(num_unmapped + num_frames < usize::MAX);
1144                    num_unmapped += num_frames;
1145                    proof {
1146                        step_delta = num_frames as nat;
1147                    }
1148                    assume(va + len <= usize::MAX);
1149                    #[verus_spec(with Tracked(tlb_model))]
1150                    self.flusher.issue_tlb_flush_with(TlbFlushOp::Range(va..va + len), pt);
1151                },
1152            }
1153
1154            proof {
1155                assert(num_unmapped as nat == start_mappings.filter(
1156                    |m: Mapping| start_va <= m.va_range.start < cursor_owner@.cur_va,
1157                ).len());
1158                assert(self.pt_cursor.inner.va < end_va) by { admit() };
1159            }
1160        }
1161
1162        proof {
1163            cursor_owner.va.reflect_prop(self.pt_cursor.inner.va);
1164        }
1165
1166        #[verus_spec(with Tracked(tlb_model))]
1167        self.flusher.dispatch_tlb_flush();
1168
1169        num_unmapped
1170    }
1171
1172    /// Applies the operation to the next slot of mapping within the range.
1173    ///
1174    /// The range to be found in is the current virtual address with the
1175    /// provided length.
1176    ///
1177    /// The function stops and yields the actually protected range if it has
1178    /// actually protected a page, no matter if the following pages are also
1179    /// required to be protected.
1180    ///
1181    /// It also makes the cursor moves forward to the next page after the
1182    /// protected one. If no mapped pages exist in the following range, the
1183    /// cursor will stop at the end of the range and return [`None`].
1184    ///
1185    /// Note that it will **NOT** flush the TLB after the operation. Please
1186    /// make the decision yourself on when and how to flush the TLB using
1187    /// [`Self::flusher`].
1188    ///
1189    /// # Panics
1190    ///
1191    /// Panics if the length is longer than the remaining range of the cursor.
1192    #[verus_spec(r =>
1193        with Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
1194            Tracked(regions): Tracked<&mut MetaRegionOwners>,
1195            Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>,
1196        requires
1197            old(regions).inv(),
1198            old(owner).inv(),
1199            !old(owner).cur_entry_owner().is_absent(),
1200            old(self).pt_cursor.inner.wf(*old(owner)),
1201            old(self).pt_cursor.inner.inv(),
1202            old(owner).in_locked_range(),
1203            !old(owner).popped_too_high,
1204            old(owner).children_not_locked(*old(guards)),
1205            old(owner).nodes_locked(*old(guards)),
1206            old(owner).relate_region(*old(regions)),
1207            len % PAGE_SIZE == 0,
1208            old(self).pt_cursor.inner.level < NR_LEVELS,
1209            old(self).pt_cursor.inner.va + len <= old(self).pt_cursor.inner.barrier_va.end,
1210            old(owner).cur_entry_owner().is_frame(),
1211            op.requires((old(owner).cur_entry_owner().frame.unwrap().prop,)),
1212        ensures
1213    )]
1214    pub fn protect_next(
1215        &mut self,
1216        len: usize,
1217        op: impl FnOnce(PageProperty) -> PageProperty,
1218    ) -> Option<Range<Vaddr>> {
1219        // SAFETY: It is safe to protect memory in the userspace.
1220        unsafe {
1221            #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
1222            self.pt_cursor.protect_next(len, op)
1223        }
1224    }
1225}
1226
1227/*cpu_local_cell! {
1228    /// The `Arc` pointer to the activated VM space on this CPU. If the pointer
1229    /// is NULL, it means that the activated page table is merely the kernel
1230    /// page table.
1231    // TODO: If we are enabling ASID, we need to maintain the TLB state of each
1232    // CPU, rather than merely the activated `VmSpace`. When ASID is enabled,
1233    // the non-active `VmSpace`s can still have their TLB entries in the CPU!
1234    static ACTIVATED_VM_SPACE: *const VmSpace = core::ptr::null();
1235}*/
1236
1237/*#[cfg(ktest)]
1238pub(super) fn get_activated_vm_space() -> *const VmSpace {
1239    ACTIVATED_VM_SPACE.load()
1240}*/
1241
1242/// The configuration for user page tables.
1243#[derive(Clone, Debug)]
1244pub struct UserPtConfig {}
1245
1246/// The item that can be mapped into the [`VmSpace`].
1247#[derive(Clone)]
1248pub struct MappedItem {
1249    pub frame: UFrame,
1250    pub prop: PageProperty,
1251}
1252
1253// SAFETY: `item_into_raw` and `item_from_raw` are implemented correctly,
1254unsafe impl PageTableConfig for UserPtConfig {
1255    open spec fn TOP_LEVEL_INDEX_RANGE_spec() -> Range<usize> {
1256        0..256
1257    }
1258
1259    open spec fn TOP_LEVEL_CAN_UNMAP_spec() -> (b: bool) {
1260        true
1261    }
1262
1263    fn TOP_LEVEL_INDEX_RANGE() -> Range<usize> {
1264        0..256
1265    }
1266
1267    fn TOP_LEVEL_CAN_UNMAP() -> (b: bool) {
1268        true
1269    }
1270
1271    type E = PageTableEntry;
1272
1273    type C = PagingConsts;
1274
1275    type Item = MappedItem;
1276
1277    uninterp spec fn item_into_raw_spec(item: Self::Item) -> (Paddr, PagingLevel, PageProperty);
1278
1279    #[verifier::external_body]
1280    fn item_into_raw(item: Self::Item) -> (Paddr, PagingLevel, PageProperty) {
1281        let MappedItem { frame, prop } = item;
1282        let level = frame.map_level();
1283        let paddr = frame.into_raw();
1284        (paddr, level, prop)
1285    }
1286
1287    uninterp spec fn item_from_raw_spec(
1288        paddr: Paddr,
1289        level: PagingLevel,
1290        prop: PageProperty,
1291    ) -> Self::Item;
1292
1293    #[verifier::external_body]
1294    fn item_from_raw(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self::Item {
1295        let frame = unsafe { UFrame::from_raw(paddr) };
1296        MappedItem { frame, prop }
1297    }
1298
1299    axiom fn axiom_nr_subpage_per_huge_eq_nr_entries();
1300
1301    axiom fn item_roundtrip(item: Self::Item, paddr: Paddr, level: PagingLevel, prop: PageProperty);
1302}
1303
1304} // verus!