ostd/mm/vm_space.rs
1// SPDX-License-Identifier: MPL-2.0
2//! Virtual memory space management.
3//!
4//! The [`VmSpace`] struct is provided to manage the virtual memory space of a
5//! user. Cursors are used to traverse and modify over the virtual memory space
6//! concurrently. The VM space cursor [`self::Cursor`] is just a wrapper over
7//! the page table cursor, providing efficient, powerful concurrent accesses
8//! to the page table.
9use alloc::vec::Vec;
10use vstd::atomic::PermissionU64;
11use vstd::pervasive::{arbitrary, proof_from_false};
12use vstd::prelude::*;
13use vstd::simple_pptr::PointsTo;
14use vstd::vpanic;
15
16use crate::specs::mm::virt_mem_newer::{MemView, VirtPtr};
17
18use crate::error::Error;
19use crate::mm::frame::untyped::UFrame;
20use crate::mm::frame::MetaSlot;
21use crate::mm::io::VmIoMemView;
22use crate::mm::kspace::KernelPtConfig;
23use crate::mm::page_table::*;
24use crate::mm::page_table::{EntryOwner, PageTableFrag, PageTableGuard};
25use crate::specs::arch::*;
26use crate::specs::mm::frame::mapping::meta_to_frame;
27use crate::specs::mm::frame::meta_owners::{MetaPerm, MetaSlotStorage, MetadataInnerPerms};
28use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
29use crate::specs::mm::page_table::cursor::owners::CursorOwner;
30use crate::specs::mm::page_table::*;
31use crate::specs::mm::tlb::TlbModel;
32use crate::specs::task::InAtomicMode;
33use core::marker::PhantomData;
34use core::{ops::Range, sync::atomic::Ordering};
35use vstd_extra::ghost_tree::*;
36
37use crate::mm::kspace::KERNEL_PAGE_TABLE;
38use crate::mm::tlb::*;
39use crate::specs::mm::cpu::{AtomicCpuSet, CpuSet};
40
41use crate::{
42 mm::{
43 io::{VmIoOwner, VmReader, VmWriter},
44 page_prop::PageProperty,
45 Paddr, PagingConstsTrait, PagingLevel, Vaddr, MAX_USERSPACE_VADDR,
46 },
47 prelude::*,
48};
49
50use alloc::sync::Arc;
51
52#[path = "../../specs/mm/vm_space.rs"]
53pub mod vm_space_specs;
54use vm_space_specs::*;
55
56verus! {
57
58/// A virtual address space for user-mode tasks, enabling safe manipulation of user-space memory.
59///
60/// The [`VmSpace`] type provides memory isolation guarantees between user-space and
61/// kernel-space. For example, given an arbitrary user-space pointer, one can read and
62/// write the memory location referred to by the user-space pointer without the risk of
63/// breaking the memory safety of the kernel space.
64///
65/// # Task Association Semantics
66///
67/// As far as OSTD is concerned, a [`VmSpace`] is not necessarily associated with a task. Once a
68/// [`VmSpace`] is activated (see [`VmSpace::activate`]), it remains activated until another
69/// [`VmSpace`] is activated **possibly by another task running on the same CPU**.
70///
71/// This means that it's up to the kernel to ensure that a task's [`VmSpace`] is always activated
72/// while the task is running. This can be done by using the injected post schedule handler
73/// (see [`inject_post_schedule_handler`]) to always activate the correct [`VmSpace`] after each
74/// context switch.
75///
76/// If the kernel otherwise decides not to ensure that the running task's [`VmSpace`] is always
77/// activated, the kernel must deal with race conditions when calling methods that require the
78/// [`VmSpace`] to be activated, e.g., [`UserMode::execute`], [`VmReader`] and [`VmWriter`].
79/// Otherwise, the behavior is unspecified, though it's guaranteed _not_ to compromise the kernel's
80/// memory safety.
81///
82/// # Memory Backing
83///
84/// A newly-created [`VmSpace`] is not backed by any physical memory pages. To
85/// provide memory pages for a [`VmSpace`], one can allocate and map physical
86/// memory ([`UFrame`]s) to the [`VmSpace`] using the cursor.
87///
88/// A [`VmSpace`] can also attach a page fault handler, which will be invoked to
89/// handle page faults generated from user space.
90///
91/// [`inject_post_schedule_handler`]: crate::task::inject_post_schedule_handler
92/// [`UserMode::execute`]: crate::user::UserMode::execute
93/// [`VmReader`]: crate::mm::io::VmWriter
94/// [`VmReader`]: crate::mm::io::VmReader
95/// # Verification Design
96///
97/// A [`VmSpace`] has a corresponding [`VmSpaceOwner`] object that is used to track its state,
98/// and against which its invariants are stated. The [`VmSpaceOwner`] catalogues the readers and writers
99/// that are associated with the [`VmSpace`], and the [`MemView`] which encodes the active page table and
100/// the subset of the TLB that covers the same virtual address space.
101/// All proofs about the correctness of the readers and writers are founded on the well-formedness of the [`MemView`]:
102///
103/// ```rust
104/// open spec fn mem_view_wf(self) -> bool {
105/// &&& self.mem_view is Some <==> self.mv_range@ is Some
106/// // This requires that TotalMapping (mvv) = mv ∪ writer mappings ∪ reader mappings
107/// &&& self.mem_view matches Some(remaining_view)
108/// ==> self.mv_range@ matches Some(total_view)
109/// ==> {
110/// &&& remaining_view.mappings_are_disjoint()
111/// &&& remaining_view.mappings.finite()
112/// &&& total_view.mappings_are_disjoint()
113/// &&& total_view.mappings.finite()
114/// // ======================
115/// // Remaining Consistency
116/// // ======================
117/// &&& remaining_view.mappings.subset_of(total_view.mappings)
118/// &&& remaining_view.memory.dom().subset_of(
119/// total_view.memory.dom(),
120/// )
121/// // =====================
122/// // Total View Consistency
123/// // =====================
124/// &&& forall|va: usize|
125/// remaining_view.addr_transl(va) == total_view.addr_transl(
126/// va,
127/// )
128/// // =====================
129/// // Writer correctness
130/// // =====================
131/// &&& forall|i: int|
132/// 0 <= i < self.writers.len() as int ==> {
133/// &&& self.writers[i].inv()
134/// }
135/// }
136/// }
137/// }
138/// ```
139pub struct VmSpace<'a> {
140 pub pt: PageTable<UserPtConfig>,
141 pub cpus: AtomicCpuSet,
142 pub _marker: PhantomData<&'a ()>,
143}
144
145type Result<A> = core::result::Result<A, Error>;
146
147#[verus_verify]
148impl<'a> VmSpace<'a> {
149 /// Creates a new VM address space.
150 ///
151 /// This allocates a new user page table by duplicating the kernel page
152 /// table's top-level entries, and returns a [`VmSpace`] that wraps it.
153 ///
154 /// # Verified Properties
155 /// ## Preconditions
156 /// - **Safety Invariants**: The meta-region invariants must hold.
157 /// ## Postconditions
158 /// - The returned [`VmSpace`] instance satisfies the invariants of [`VmSpace`].
159 #[inline]
160 #[verus_spec(r =>
161 with Tracked(regions): Tracked<&mut MetaRegionOwners>,
162 Tracked(guards_k): Tracked<&mut Guards<'static, KernelPtConfig>>,
163 Tracked(guards_u): Tracked<&mut Guards<'static, UserPtConfig>>,
164 requires
165 old(regions).inv(),
166 )]
167 pub fn new() -> Self {
168 proof_decl! {
169 let tracked mut kernel_owner_opt: Option<&PageTableOwner<KernelPtConfig>> = None;
170 }
171 let kpt = crate::mm::kspace::kvirt_area::get_kernel_page_table(
172 Tracked(&mut kernel_owner_opt), Tracked(regions), Tracked(guards_k),
173 );
174 proof_decl! {
175 let tracked kernel_owner = kernel_owner_opt.tracked_take();
176 }
177 let pt = {
178 #[verus_spec(with Tracked(kernel_owner), Tracked(regions), Tracked(guards_k), Tracked(guards_u))]
179 kpt.create_user_page_table::<crate::specs::task::AnyAtomicGuard>()
180 };
181 Self { pt, cpus: AtomicCpuSet::new(CpuSet::new_empty()), _marker: PhantomData }
182 }
183
184 /// Gets an immutable cursor in the virtual address range.
185 ///
186 /// The cursor behaves like a lock guard, exclusively owning a sub-tree of
187 /// the page table, preventing others from creating a cursor in it. So be
188 /// sure to drop the cursor as soon as possible.
189 ///
190 /// The creation of the cursor may block if another cursor having an
191 /// overlapping range is alive.
192 ///
193 /// # Verified Properties
194 /// ## Preconditions
195 /// - **Safety Invariants**: The page table owner must be valid.
196 /// ## Postconditions
197 /// - When the virtual address range satisfies
198 /// [`cursor_new_success_conditions`](crate::mm::page_table::Cursor::cursor_new_success_conditions),
199 /// the result is `Ok` and a [`CursorOwner`] is returned.
200 #[verus_spec(r =>
201 with Tracked(owner): Tracked<PageTableOwner<UserPtConfig>>,
202 Tracked(guard_perm): Tracked<GuardPerm<'a, UserPtConfig>>,
203 Tracked(regions): Tracked<&mut MetaRegionOwners>,
204 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>
205 -> cursor_owner: Tracked<Option<CursorOwner<'a, UserPtConfig>>>
206 requires
207 owner.inv(),
208 ensures
209 crate::mm::page_table::Cursor::<UserPtConfig, G>::cursor_new_success_conditions(va) ==> (r matches Ok(_) && cursor_owner@ matches Some(_)),
210 )]
211 pub fn cursor<G: InAtomicMode>(&'a self, guard: &'a G, va: &Range<Vaddr>) -> Result<
212 Cursor<'a, G>,
213 > {
214 proof_decl! {
215 let tracked mut out_owner: Option<CursorOwner<'a, UserPtConfig>>;
216 }
217 match {
218 #[verus_spec(with Tracked(owner), Tracked(guard_perm), Tracked(regions), Tracked(guards))]
219 self.pt.cursor(guard, va)
220 } {
221 Ok((pt_cursor, tracked_owner)) => {
222 proof! { out_owner = Some::<CursorOwner<'a, UserPtConfig>>(tracked_owner.get()); }
223 proof_with!(|= Tracked(out_owner));
224 Ok(Cursor(pt_cursor))
225 },
226 Err(e) => {
227 proof! { out_owner = None; }
228 proof_with!(|= Tracked(out_owner));
229 Err(Error::AccessDenied)
230 },
231 }
232 }
233
234 /// Gets a mutable cursor in the virtual address range.
235 ///
236 /// The same as [`Self::cursor`], the cursor behaves like a lock guard,
237 /// exclusively owning a sub-tree of the page table, preventing others
238 /// from creating a cursor in it. So be sure to drop the cursor as soon as
239 /// possible.
240 ///
241 /// The creation of the cursor may block if another cursor having an
242 /// overlapping range is alive. The modification to the mapping by the
243 /// cursor may also block or be overridden by the mapping of another cursor.
244 ///
245 /// # Verified Properties
246 /// ## Preconditions
247 /// - **Safety Invariants**: The page table owner must be valid.
248 /// ## Postconditions
249 /// - When the virtual address range satisfies
250 /// [`cursor_new_success_conditions`](crate::mm::page_table::Cursor::cursor_new_success_conditions),
251 /// the result is `Ok` and a [`CursorOwner`] is returned.
252 #[verus_spec(r =>
253 with Tracked(owner): Tracked<PageTableOwner<UserPtConfig>>,
254 Tracked(guard_perm): Tracked<GuardPerm<'a, UserPtConfig>>,
255 Tracked(regions): Tracked<&mut MetaRegionOwners>,
256 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>
257 -> cursor_owner: Tracked<Option<CursorOwner<'a, UserPtConfig>>>
258 requires
259 ensures
260 crate::mm::page_table::Cursor::<UserPtConfig, G>::cursor_new_success_conditions(va) ==> (r matches Ok(_) && cursor_owner@ matches Some(_)),
261 )]
262 pub fn cursor_mut<G: InAtomicMode>(&'a self, guard: &'a G, va: &Range<Vaddr>) -> Result<
263 CursorMut<'a, G>,
264 > {
265 proof_decl! {
266 let tracked mut out_owner: Option<CursorOwner<'a, UserPtConfig>>;
267 }
268 match {
269 #[verus_spec(with Tracked(owner), Tracked(guard_perm), Tracked(regions), Tracked(guards))]
270 self.pt.cursor_mut(guard, va)
271 } {
272 Ok((pt_cursor, tracked_owner)) => {
273 proof! { out_owner = Some::<CursorOwner<'a, UserPtConfig>>(tracked_owner.get()); }
274 proof_with!(|= Tracked(out_owner));
275 Ok(CursorMut { pt_cursor, flusher: TlbFlusher::new(&self.cpus) })
276 },
277 Err(e) => {
278 proof! { out_owner = None; }
279 proof_with!(|= Tracked(out_owner));
280 Err(Error::AccessDenied)
281 },
282 }
283 }
284
285 /// Creates a reader to read data from the user space of the current task.
286 ///
287 /// Returns [`Err`] if this [`VmSpace`] is not the user space of the current task
288 /// or the `vaddr` and `len` do not represent a valid user space memory range.
289 ///
290 /// # Verified Properties
291 /// ## Preconditions
292 /// - The [`VmSpaceOwner`] invariant must hold.
293 /// ## Postconditions
294 /// - When [`Self::reader_success_cond`] holds, the result is `Ok`.
295 /// - On success, the [`VmReader`] and its [`VmIoOwner`] are well-formed with no memory view.
296 /// ## Safety
297 /// - The function does not interact with the lower-level memory system directly.
298 /// By checking that the target (user) page table is not the active (kernel) one,
299 /// we ensure that the resulting reader cannot interact with kernel memory.
300 #[inline]
301 #[verus_spec(r =>
302 with
303 Tracked(owner): Tracked<&'a mut VmSpaceOwner<'a>>,
304 -> reader_owner: Tracked<Option<VmIoOwner<'a>>>,
305 requires
306 old(owner).inv(),
307 ensures
308 final(owner).inv(),
309 self.reader_success_cond(vaddr, len) ==> r is Ok && reader_owner@ is Some,
310 r is Ok && reader_owner@ is Some ==> {
311 &&& r.unwrap().wf(reader_owner@.unwrap())
312 &&& reader_owner@.unwrap().mem_view is None
313 }
314 )]
315 pub fn reader(&self, vaddr: Vaddr, len: usize) -> Result<VmReader<'a>> {
316 if current_page_table_paddr() != self.pt.root_paddr() {
317 proof_with!(|= Tracked(None));
318 Err(Error::AccessDenied)
319 } else if vaddr.checked_add(len).unwrap_or(usize::MAX) > MAX_USERSPACE_VADDR {
320 proof_with!(|= Tracked(None));
321 Err(Error::AccessDenied)
322 } else {
323 let ghost id = owner.new_vm_io_id();
324 proof_decl! {
325 let tracked mut vm_reader_owner: VmIoOwner<'a>;
326 }
327
328 // SAFETY: The memory range is in user space, as checked above.
329 let reader = unsafe {
330 proof_with!(Ghost(id) => Tracked(vm_reader_owner));
331 VmReader::from_user_space(VirtPtr::from_vaddr(vaddr, len), len)
332 };
333
334 proof_with!(|= Tracked(Some(vm_reader_owner)));
335 Ok(reader)
336 }
337 }
338
339 /// Creates a writer to write data to the user space of the current task.
340 ///
341 /// Returns [`Err`] if this [`VmSpace`] is not the user space of the current task
342 /// or the `vaddr` and `len` do not represent a valid user space memory range.
343 ///
344 /// # Verified Properties
345 /// ## Preconditions
346 /// - The [`VmSpaceOwner`] invariant must hold.
347 /// ## Postconditions
348 /// - When [`Self::writer_success_cond`] holds, the result is `Ok`.
349 /// - On success, the [`VmWriter`] and its [`VmIoOwner`] are well-formed with no memory view.
350 /// ## Safety
351 /// - The function does not interact with the lower-level memory system directly.
352 /// By checking that the target (user) page table is not the active (kernel) one,
353 /// we ensure that the resulting writer cannot interact with kernel memory.
354 #[inline]
355 #[verus_spec(r =>
356 with
357 Tracked(owner): Tracked<&mut VmSpaceOwner<'a>>,
358 -> writer_owner: Tracked<Option<VmIoOwner<'a>>>,
359 requires
360 old(owner).inv(),
361 ensures
362 final(owner).inv(),
363 self.writer_success_cond(vaddr, len) ==> r is Ok && writer_owner@ is Some,
364 r is Ok && writer_owner@ is Some ==> {
365 &&& r.unwrap().wf(writer_owner@.unwrap())
366 &&& writer_owner@.unwrap().mem_view is None
367 }
368 )]
369 pub fn writer(self, vaddr: Vaddr, len: usize) -> Result<VmWriter<'a>> {
370 if current_page_table_paddr() != self.pt.root_paddr() {
371 proof_with!(|= Tracked(None));
372 Err(Error::AccessDenied)
373 } else if vaddr.checked_add(len).unwrap_or(usize::MAX) > MAX_USERSPACE_VADDR {
374 proof_with!(|= Tracked(None));
375 Err(Error::AccessDenied)
376 } else {
377 let ghost id = owner.new_vm_io_id();
378 proof_decl! {
379 let tracked mut vm_writer_owner: VmIoOwner<'a>;
380 }
381
382 // SAFETY: The memory range is in user space, as checked above.
383 let reader = unsafe {
384 proof_with!(Ghost(id) => Tracked(vm_writer_owner));
385 VmWriter::from_user_space(VirtPtr::from_vaddr(vaddr, len), len)
386 };
387
388 proof_with!(|= Tracked(Some(vm_writer_owner)));
389 Ok(reader)
390 }
391 }
392}
393
394/// The cursor for querying over the VM space without modifying it.
395///
396/// It exclusively owns a sub-tree of the page table, preventing others from
397/// reading or modifying the same sub-tree. Two read-only cursors can not be
398/// created from the same virtual address range either.
399pub struct Cursor<'a, A: InAtomicMode>(pub crate::mm::page_table::Cursor<'a, UserPtConfig, A>);
400
401#[verus_verify]
402impl<'rcu, A: InAtomicMode> Cursor<'rcu, A> {
403 /// Queries the mapping at the current virtual address.
404 ///
405 /// If the cursor is pointing to a valid virtual address that is locked,
406 /// it will return the virtual address range and the mapped item.
407 /// ## Preconditions
408 /// - All system invariants must hold
409 /// - **Liveness**: The function will return an error if the cursor is not within the locked range
410 /// ## Postconditions
411 /// - If there is a mapped item at the current virtual address ([`query_some_condition`]),
412 /// it is returned along with the virtual address range that it maps ([`query_success_ensures`]).
413 /// - The mapping that is returned corresponds to the abstract mapping given by [`query_item_spec`](CursorView::query_item_spec).
414 /// - If there is no mapped item at the current virtual address ([`query_none_condition`]),
415 /// it returns [`None`], and the virtual address range of the cursor's current position.
416 /// ## Safety
417 /// - This function preserves all memory invariants.
418 /// - The locking mechanism prevents data races.
419 #[verus_spec(r =>
420 with Tracked(owner): Tracked<&mut CursorOwner<'rcu, UserPtConfig>>,
421 Tracked(regions): Tracked<&mut MetaRegionOwners>,
422 Tracked(guards): Tracked<&mut Guards<'rcu, UserPtConfig>>
423 requires
424 old(self).0.invariants(*old(owner), *old(regions), *old(guards)),
425 old(owner).in_locked_range(),
426 // Non-panic precondition (ref-count non-saturation) propagated from
427 // Cursor::query.
428 forall |i: usize|
429 #![trigger old(regions).slot_owners[i]]
430 old(regions).slot_owners.contains_key(i)
431 && old(regions).slot_owners[i].inner_perms.ref_count.value()
432 != crate::specs::mm::frame::meta_owners::REF_COUNT_UNUSED
433 ==> old(regions).slot_owners[i].inner_perms.ref_count.value() + 1
434 < crate::specs::mm::frame::meta_owners::REF_COUNT_MAX,
435 ensures
436 final(self).0.invariants(*final(owner), *final(regions), *final(guards)),
437 final(self).0.query_some_condition(*final(owner)) ==> {
438 &&& r is Ok
439 &&& final(self).0.query_some_ensures(*final(owner), r.unwrap())
440 },
441 !final(self).0.query_some_condition(*final(owner)) ==> {
442 &&& r is Ok
443 &&& final(self).0.query_none_ensures(*final(owner), r.unwrap())
444 },
445 old(owner)@.mappings == final(owner)@.mappings,
446 forall |e: EntryOwner<UserPtConfig>| #[trigger] e.inv() && e.metaregion_sound(*old(regions)) ==> e.metaregion_sound(*final(regions)),
447 )]
448 pub fn query(&mut self) -> Result<(Range<Vaddr>, Option<MappedItem>)> {
449 Ok(
450 #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
451 self.0.query()?,
452 )
453 }
454
455 /// Moves the cursor forward to the next mapped virtual address.
456 ///
457 /// If there is mapped virtual address following the current address within
458 /// next `len` bytes, it will return that mapped address. In this case,
459 /// the cursor will stop at the mapped address.
460 ///
461 /// Otherwise, it will return `None`. And the cursor may stop at any
462 /// address after `len` bytes.
463 ///
464 /// # Verified Properties
465 /// ## Preconditions
466 /// - **Safety Invariants**: The page table cursor safety invariants
467 /// ([crate::mm::page_table::Cursor::invariants]) must hold before the call.
468 /// - **Liveness**: In order to avoid a panic, the length must be page-aligned and less than or equal to the remaining range of the cursor.
469 /// ## Postconditions
470 /// - **Safety Invariants**: Page table cursor safety invariants are preserved.
471 /// - **Correctness**: If there is a mapped address after the current address within the next `len` bytes,
472 /// it will move the cursor to the next mapped address and return it.
473 /// - **Correctness**: If the metadata region was well-formed before the call, it will be well-formed after.
474 /// ## Panics
475 /// This method panics if the length is longer than the remaining range of the cursor.
476 /// ## Safety
477 /// This function preserves all memory invariants.
478 /// Because it panics rather than move the cursor to an invalid address,
479 /// it ensures that the cursor is safe to use after the call.
480 #[verus_spec(res =>
481 with Tracked(owner): Tracked<&mut CursorOwner<'rcu, UserPtConfig>>,
482 Tracked(regions): Tracked<&mut MetaRegionOwners>,
483 Tracked(guards): Tracked<&mut Guards<'rcu, UserPtConfig>>
484 requires
485 old(self).0.invariants(*old(owner), *old(regions), *old(guards)),
486 ensures
487 !old(self).0.find_next_panic_condition(len),
488 final(self).0.invariants(*final(owner), *final(regions), *final(guards)),
489 res is Some ==> {
490 &&& res.unwrap() == final(self).0.va
491 &&& final(owner).level < final(owner).guard_level
492 &&& final(owner).in_locked_range()
493 },
494 )]
495 pub fn find_next(&mut self, len: usize) -> (res: Option<Vaddr>) {
496 #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
497 self.0.find_next(len)
498 }
499
500 /// Jump to the virtual address.
501 ///
502 /// This function will move the cursor to the given virtual address.
503 /// If the target address is not in the locked range, it will return an error.
504 /// # Verified Properties
505 /// ## Preconditions
506 /// - **Safety Invariants**: The page table cursor safety invariants
507 /// ([crate::mm::page_table::Cursor::invariants]) must hold before the call.
508 /// - **Liveness**: The function will panic if the target `va` is not aligned
509 /// to the base page size.
510 /// ## Postconditions
511 /// - **Safety Invariants**: Page table cursor safety invariants are preserved.
512 /// - **Correctness**: If the target `va` is within the cursor's locked range,
513 /// the result will be `Ok` and the cursor's virtual address will be set to `va`.
514 /// - **Correctness**: If the target `va` is outside the locked range, the result is `Err`.
515 /// - **Correctness**: If the metadata region was well-formed before the call, it will be well-formed after.
516 /// ## Panics
517 /// This method panics if the target address is not aligned to the page size.
518 /// ## Safety
519 /// This function preserves all memory invariants.
520 /// Because it throws an error rather than move the cursor to an invalid address,
521 /// it ensures that the cursor is safe to use after the call.
522 /// The locking mechanism prevents data races.
523 #[verus_spec(res =>
524 with Tracked(owner): Tracked<&mut CursorOwner<'rcu, UserPtConfig>>,
525 Tracked(regions): Tracked<&mut MetaRegionOwners>,
526 Tracked(guards): Tracked<&mut Guards<'rcu, UserPtConfig>>
527 requires
528 old(self).0.invariants(*old(owner), *old(regions), *old(guards)),
529 old(owner).in_locked_range(),
530 ensures
531 !old(self).0.jump_panic_condition(va),
532 final(self).0.invariants(*final(owner), *final(regions), *final(guards)),
533 final(self).0.barrier_va.start <= va < final(self).0.barrier_va.end ==> {
534 &&& res is Ok
535 &&& final(self).0.va == va
536 },
537 !(final(self).0.barrier_va.start <= va < final(self).0.barrier_va.end) ==> res is Err,
538 )]
539 pub fn jump(&mut self, va: Vaddr) -> Result<()> {
540 (#[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
541 self.0.jump(va))?;
542 Ok(())
543 }
544
545 /// Get the virtual address of the current slot.
546 pub fn virt_addr(&self) -> Vaddr
547 returns
548 self.0.va,
549 {
550 self.0.virt_addr()
551 }
552}
553
554/// The cursor for modifying the mappings in VM space.
555///
556/// It exclusively owns a sub-tree of the page table, preventing others from
557/// reading or modifying the same sub-tree.
558pub struct CursorMut<'a, A: InAtomicMode> {
559 pub pt_cursor: crate::mm::page_table::CursorMut<'a, UserPtConfig, A>,
560 // We have a read lock so the CPU set in the flusher is always a superset
561 // of actual activated CPUs.
562 pub flusher: TlbFlusher<'a /*, DisabledPreemptGuard*/ >,
563}
564
565#[verus_verify]
566impl<'a, A: InAtomicMode> CursorMut<'a, A> {
567 /// Queries the mapping at the current virtual address.
568 ///
569 /// This is the same as [`Cursor::query`].
570 ///
571 /// If the cursor is pointing to a valid virtual address that is locked,
572 /// it will return the virtual address range and the mapped item.
573 /// ## Preconditions
574 /// - **Safety Invariants**: The page table cursor safety invariants
575 /// ([crate::mm::page_table::Cursor::invariants]) must hold before the call.
576 /// ## Postconditions
577 /// - **Safety Invariants**: Page table cursor safety invariants are preserved.
578 /// - **Correctness**: If the cursor is within the locked range, the result is `Ok`.
579 /// - **Correctness**: If there is a mapped item at the current virtual address ([`query_some_condition`]),
580 /// it is returned along with the virtual address range that it maps ([`query_success_ensures`]).
581 /// - **Correctness**: The mapping that is returned corresponds to the abstract mapping given by [`query_item_spec`](CursorView::query_item_spec).
582 /// - **Correctness**: If there is no mapped item at the current virtual address ([`query_none_condition`]),
583 /// it returns `None`, and the virtual address range of the cursor's current position.
584 /// - **Correctness**: If the metadata region was well-formed before the call, it will be well-formed after.
585 /// - **Safety**: The mappings in the page table are not affected.
586 /// - **Safety**: The soundness of individual entries are not affected.
587 /// ## Safety
588 /// - This function preserves all memory invariants.
589 /// - The locking mechanism prevents data races.
590 #[verus_spec(res =>
591 with Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
592 Tracked(regions): Tracked<&mut MetaRegionOwners>,
593 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>
594 requires
595 old(self).pt_cursor.inner.invariants(*old(owner), *old(regions), *old(guards)),
596 old(owner).in_locked_range(),
597 // Non-panic precondition (ref-count non-saturation) propagated from
598 // Cursor::query.
599 forall |i: usize|
600 #![trigger old(regions).slot_owners[i]]
601 old(regions).slot_owners.contains_key(i)
602 && old(regions).slot_owners[i].inner_perms.ref_count.value()
603 != crate::specs::mm::frame::meta_owners::REF_COUNT_UNUSED
604 ==> old(regions).slot_owners[i].inner_perms.ref_count.value() + 1
605 < crate::specs::mm::frame::meta_owners::REF_COUNT_MAX,
606 ensures
607 final(self).pt_cursor.inner.invariants(*final(owner), *final(regions), *final(guards)),
608 old(owner).in_locked_range() ==> res is Ok,
609 res matches Ok(state) ==>
610 final(self).pt_cursor.inner.query_some_condition(*final(owner)) ==>
611 final(self).pt_cursor.inner.query_some_ensures(*final(owner), state),
612 res matches Ok(state) ==>
613 !final(self).pt_cursor.inner.query_some_condition(*final(owner)) ==>
614 final(self).pt_cursor.inner.query_none_ensures(*final(owner), state),
615 old(owner)@.mappings == final(owner)@.mappings,
616 forall |e: EntryOwner<UserPtConfig>| #[trigger] e.inv() && e.metaregion_sound(*old(regions)) ==> e.metaregion_sound(*final(regions)),
617 )]
618 pub fn query(&mut self) -> Result<(Range<Vaddr>, Option<MappedItem>)> {
619 Ok(
620 #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
621 self.pt_cursor.query()?,
622 )
623 }
624
625 /// Moves the cursor forward to the next mapped virtual address.
626 ///
627 /// This is the same as [`Cursor::find_next`].
628 ///
629 /// # Verified Properties
630 /// ## Preconditions
631 /// - **Safety Invariants**: The page table cursor safety invariants
632 /// ([crate::mm::page_table::Cursor::invariants]) must hold before the call.
633 /// - **Liveness**: In order to avoid a panic, the length must be page-aligned and less than or equal to the remaining range of the cursor.
634 /// ## Postconditions
635 /// - **Safety Invariants**: Page table cursor safety invariants are preserved.
636 /// - **Correctness**: If there is a mapped address after the current address within the next `len` bytes,
637 /// it will move the cursor to the next mapped address and return it.
638 /// - **Correctness**: If the metadata region was well-formed before the call, it will be well-formed after.
639 /// ## Panics
640 /// This method panics if the length is longer than the remaining range of the cursor.
641 /// ## Safety
642 /// This function preserves all memory invariants.
643 /// Because it panics rather than move the cursor to an invalid address,
644 /// it ensures that the cursor is safe to use after the call.
645 #[verus_spec(res =>
646 with Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
647 Tracked(regions): Tracked<&mut MetaRegionOwners>,
648 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>
649 )]
650 pub fn find_next(&mut self, len: usize) -> (res: Option<Vaddr>)
651 requires
652 old(self).pt_cursor.inner.invariants(*old(owner), *old(regions), *old(guards)),
653 ensures
654 !old(self).pt_cursor.inner.find_next_panic_condition(len),
655 final(self).pt_cursor.inner.invariants(*final(owner), *final(regions), *final(guards)),
656 res is Some ==> {
657 &&& res.unwrap() == final(self).pt_cursor.inner.va
658 &&& final(owner).level < final(owner).guard_level
659 &&& final(owner).in_locked_range()
660 },
661 {
662 #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
663 self.pt_cursor.find_next(len)
664 }
665
666 /// Jump to the virtual address.
667 ///
668 /// This is the same as [`Cursor::jump`].
669 ///
670 /// # Verified Properties
671 /// ## Preconditions
672 /// - **Safety Invariants**: The page table cursor safety invariants
673 /// ([crate::mm::page_table::Cursor::invariants]) must hold before the call.
674 /// - **Liveness**: The function will panic if the target `va` is not aligned
675 /// to the base page size.
676 /// ## Postconditions
677 /// - **Safety Invariants**: Page table cursor safety invariants are preserved.
678 /// - **Correctness**: If the target `va` is within the cursor's locked range,
679 /// the result will be `Ok` and the cursor's virtual address will be set to `va`.
680 /// - **Correctness**: If the target `va` is outside the locked range, the result is `Err`.
681 /// - **Correctness**: If the metadata region was well-formed before the call, it will be well-formed after.
682 /// ## Panics
683 /// This method panics if the target address is not aligned to the page size.
684 /// ## Safety
685 /// This function preserves all memory invariants.
686 /// Because it throws an error rather than move the cursor to an invalid address,
687 /// it ensures that the cursor is safe to use after the call.
688 /// The locking mechanism prevents data races.
689 #[verus_spec(res =>
690 with
691 Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
692 Tracked(regions): Tracked<&mut MetaRegionOwners>,
693 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>
694 requires
695 old(self).pt_cursor.inner.invariants(*old(owner), *old(regions), *old(guards)),
696 old(owner).in_locked_range(),
697 ensures
698 !old(self).pt_cursor.inner.jump_panic_condition(va),
699 final(self).pt_cursor.inner.invariants(*final(owner), *final(regions), *final(guards)),
700 final(self).pt_cursor.inner.barrier_va.start <= va < final(self).pt_cursor.inner.barrier_va.end ==> {
701 &&& res is Ok
702 &&& final(self).pt_cursor.inner.va == va
703 },
704 !(final(self).pt_cursor.inner.barrier_va.start <= va < final(self).pt_cursor.inner.barrier_va.end) ==> res is Err,
705 )]
706 pub fn jump(&mut self, va: Vaddr) -> Result<()> {
707 (#[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
708 self.pt_cursor.jump(va))?;
709 Ok(())
710 }
711
712 /// Get the virtual address of the current slot.
713 #[verus_spec(r =>
714 returns
715 self.pt_cursor.inner.va,
716 )]
717 pub fn virt_addr(&self) -> Vaddr {
718 self.pt_cursor.virt_addr()
719 }
720
721 /// Get the dedicated TLB flusher for this cursor.
722 #[verus_spec(ret =>
723 ensures
724 *ret == old(self).flusher,
725 *final(ret) == final(self).flusher,
726 )]
727 pub fn flusher(&mut self) -> &mut TlbFlusher<'a> {
728 &mut self.flusher
729 }
730
731 /// Map a frame into the current slot.
732 ///
733 /// This method will bring the cursor to the next slot after the modification.
734 /// If there is an existing mapping at the current slot, it will be replaced
735 /// and the TLB will be flushed for that entry.
736 /// # Verified Properties
737 /// ## Preconditions
738 /// - **Safety Invariants**: The page table cursor safety invariants
739 /// ([`invariants`](crate::mm::page_table::Cursor::invariants)) and the TLB invariant
740 /// ([`TlbModel::inv`]) must hold before the call.
741 /// - **Liveness**: The cursor must not be past the end of its locked range,
742 /// and the frame's level must fit within the remaining range, or the function will panic.
743 /// - **Bookkeeping**: The frame must be well-formed with respect to its entry owner
744 /// ([`item_wf`](Self::item_wf)).
745 /// ## Postconditions
746 /// - **Safety Invariants**: Page table cursor safety invariants are preserved.
747 /// - **Correctness**: The page table view is updated with the new mapping
748 /// according to [`map_item_ensures`](Self::map_item_ensures).
749 /// - **Correctness**: If the metadata region was well-formed before the call
750 /// and the frame was not already mapped, it will be well-formed after.
751 /// ## Safety
752 /// - For soundness purposes, it doesn't matter if a frame is mapped multiple times
753 /// in the same page table. There is still a clear definition of the behavior.
754 #[verus_spec(
755 with Tracked(cursor_owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
756 Tracked(entry_owner): Tracked<EntryOwner<UserPtConfig>>,
757 Tracked(regions): Tracked<&mut MetaRegionOwners>,
758 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>,
759 Tracked(tlb_model): Tracked<&mut TlbModel>
760 requires
761 old(tlb_model).inv(),
762 old(self).pt_cursor.inner.invariants(*old(cursor_owner), *old(regions), *old(guards)),
763 old(cursor_owner).in_locked_range(),
764 old(self).item_wf(frame, prop, entry_owner, *old(regions)),
765 ensures
766 !old(self).pt_cursor.map_panic_conditions(MappedItem { frame: frame, prop: prop }),
767 final(self).pt_cursor.inner.invariants(*final(cursor_owner), *final(regions), *final(guards)),
768 old(self).map_item_ensures(
769 frame,
770 prop,
771 old(self).pt_cursor.inner.model(*old(cursor_owner)),
772 final(self).pt_cursor.inner.model(*final(cursor_owner)),
773 ),
774 )]
775 pub fn map(&mut self, frame: UFrame, prop: PageProperty) {
776 let start_va = self.virt_addr();
777 let item = MappedItem { frame: frame, prop: prop };
778
779 assert(self.pt_cursor.item_wf(item, entry_owner)) by {};
780
781 // SAFETY: It is safe to map untyped memory into the userspace.
782 let Err(frag) = (
783 #[verus_spec(with Tracked(cursor_owner), Tracked(entry_owner), Tracked(regions), Tracked(guards))]
784 self.pt_cursor.map(item)) else {
785 return ; // No mapping exists at the current address.
786 };
787
788 match frag {
789 PageTableFrag::Mapped { va, item } => {
790 //debug_assert_eq!(va, start_va);
791 let old_frame = item.frame;
792
793 #[verus_spec(with Tracked(tlb_model))]
794 self.flusher.issue_tlb_flush_with(TlbFlushOp::Address(start_va), old_frame.into());
795 #[verus_spec(with Tracked(tlb_model))]
796 self.flusher.dispatch_tlb_flush();
797 },
798 PageTableFrag::StrayPageTable { .. } => {
799 assert(false) by {
800 assert(UserPtConfig::item_into_raw(item).1 == 1);
801 };
802 #[cfg(feature = "allow_panic")]
803 vpanic!("`UFrame` is base page sized but re-mapping out a child PT");
804 },
805 }
806 }
807
808 /// Clears the mapping starting from the current slot,
809 /// and returns the number of unmapped pages.
810 ///
811 /// This method will bring the cursor forward by `len` bytes in the virtual
812 /// address space after the modification.
813 ///
814 /// Already-absent mappings encountered by the cursor will be skipped. It
815 /// is valid to unmap a range that is not mapped.
816 ///
817 /// It must issue and dispatch a TLB flush after the operation. Otherwise,
818 /// the memory safety will be compromised. Please call this function less
819 /// to avoid the overhead of TLB flush. Using a large `len` is wiser than
820 /// splitting the operation into multiple small ones.
821 ///
822 /// # Verified Properties
823 /// ## Preconditions
824 /// - **Safety Invariants**: The page table cursor safety invariants
825 /// ([crate::mm::page_table::Cursor::invariants]) must hold before the call.
826 /// - **Safety Invariants**: The TLB invariant ([TlbModel::inv]) must hold.
827 /// - **Liveness**: In order to avoid a panic, the length must be page-aligned and less than or equal to the remaining range of the cursor.
828 /// ## Postconditions
829 /// - **Safety Invariants**: The page table cursor safety invariants are preserved.
830 /// - **Safety Invariants**: The TLB invariant is preserved.
831 /// - **Correctness**: Unmaps a range of virtual addresses from the current address up to `len` bytes
832 /// and returns the number of mappings that were removed.
833 /// - **Correctness**: If the metadata region was well-formed before the call, it will be well-formed after.
834 /// ## Panics
835 /// Panics if:
836 /// - the length is longer than the remaining range of the cursor;
837 /// - the length is not page-aligned.
838 /// ## Safety
839 /// - It is always sound to unmap pages. We flush unmapped pages from the TLB to ensure consistency.
840 /// - TODO: formalizing and proving that this function preserves TLB consistency would
841 /// be pretty straightforward and would be a nice addition to the correctness properties.
842 #[verus_spec(r =>
843 with Tracked(cursor_owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
844 Tracked(regions): Tracked<&mut MetaRegionOwners>,
845 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>,
846 Tracked(tlb_model): Tracked<&mut TlbModel>
847 requires
848 old(self).pt_cursor.inner.invariants(*old(cursor_owner), *old(regions), *old(guards)),
849 old(tlb_model).inv(),
850 ensures
851 !old(self).pt_cursor.inner.find_next_panic_condition(len),
852 final(self).pt_cursor.inner.invariants(*final(cursor_owner), *final(regions), *final(guards)),
853 old(self).pt_cursor.inner.model(*old(cursor_owner)).unmap_spec(len, final(self).pt_cursor.inner.model(*final(cursor_owner)), r),
854 final(tlb_model).inv(),
855 )]
856 pub fn unmap(&mut self, len: usize) -> usize {
857 proof {
858 cursor_owner.va.reflect_prop(self.pt_cursor.inner.va);
859 cursor_owner.view_preserves_inv();
860 }
861
862 vstd_extra::assert_eq!(len % PAGE_SIZE, 0);
863
864 //*** KNOWN BUG: `self.virt_addr() + len` could overflow. For now, assume that it doesn't. ***
865 assume(self.pt_cursor.inner.va + len <= usize::MAX);
866
867 vstd_extra::assert!(self.virt_addr() + len <= self.pt_cursor.inner.barrier_va.end);
868
869 assert(!self.pt_cursor.inner.find_next_panic_condition(len));
870 assert(!old(self).pt_cursor.inner.find_next_panic_condition(len));
871
872 let end_va = self.virt_addr() + len;
873 let mut num_unmapped: usize = 0;
874
875 let ghost start_va: Vaddr = cursor_owner@.cur_va;
876 // The "adjusted base" accumulates splits: starts as the split-at-boundaries
877 // version of start_mappings and gets updated when take_next splits huge pages.
878 let ghost mut adjusted_base: Set<Mapping> = cursor_owner@.mappings;
879 // Track the set of removed mappings explicitly (not as a VA range filter).
880 let ghost mut removed: Set<Mapping> = Set::empty();
881
882 proof {
883 // end_va <= barrier_va.end <= MAX_USERSPACE_VADDR for user page tables.
884 // barrier_va.end = locked_range().end which is bounded by the user VA space.
885 // TODO: derive from cursor construction postcondition.
886 assume(end_va <= MAX_USERSPACE_VADDR);
887
888 assert((self.pt_cursor.inner.va + len) % PAGE_SIZE as int == 0) by (compute);
889 assert(adjusted_base.difference(removed) =~= adjusted_base);
890 }
891
892 #[verus_spec(
893 invariant
894 self.pt_cursor.inner.va % PAGE_SIZE == 0,
895 end_va % PAGE_SIZE == 0,
896 end_va <= MAX_USERSPACE_VADDR,
897 self.pt_cursor.inner.invariants(*cursor_owner, *regions, *guards),
898 end_va <= self.pt_cursor.inner.barrier_va.end,
899 tlb_model.inv(),
900 start_va <= cursor_owner@.cur_va,
901 // Split-aware invariant: adjusted_base tracks accumulated splits,
902 // removed tracks the explicitly removed set.
903 cursor_owner@.mappings =~= adjusted_base.difference(removed),
904 removed.subset_of(adjusted_base),
905 num_unmapped as nat == removed.len(),
906 removed.finite(),
907 crate::specs::mm::page_table::mapping_set_lemmas::wf_mapping_set(adjusted_base),
908 // Everything removed is in the [start, end) range.
909 forall |m: Mapping| #[trigger] removed.contains(m) ==>
910 start_va <= m.va_range.start < end_va,
911 // Nothing in [start_va, end_va) with start < cursor_va remains,
912 // unless it is a sub-mapping of a boundary-straddling entry.
913 forall |m: Mapping| #![auto] adjusted_base.contains(m) && !removed.contains(m)
914 && start_va <= m.va_range.start && m.va_range.start < end_va ==>
915 m.va_range.start >= cursor_owner@.cur_va
916 || exists |parent: Mapping| #[trigger] old(cursor_owner)@.mappings.contains(parent)
917 && parent.va_range.start < start_va
918 && parent.va_range.start <= m.va_range.start
919 && m.va_range.end <= parent.va_range.end
920 && m.pa_range.start == (parent.pa_range.start + (m.va_range.start - parent.va_range.start)) as Paddr
921 && m.property == parent.property,
922 start_va == old(cursor_owner)@.cur_va,
923 old(cursor_owner)@.inv(),
924 // Locality: old mappings fully outside [start, end) survive in adjusted_base.
925 // (Straddling mappings may be split — see refinement.)
926 forall |m: Mapping| #[trigger] old(cursor_owner)@.mappings.contains(m)
927 && (m.va_range.end <= start_va || m.va_range.start >= end_va)
928 ==> #[trigger] adjusted_base.contains(m),
929 // Refinement: every mapping in adjusted_base is either from the old view
930 // or a sub-mapping of an old entry (from boundary splits).
931 forall |m: Mapping| #[trigger] adjusted_base.contains(m) ==>
932 old(cursor_owner)@.mappings.contains(m)
933 || exists |parent: Mapping| #[trigger] old(cursor_owner)@.mappings.contains(parent)
934 && parent.va_range.start <= m.va_range.start
935 && m.va_range.end <= parent.va_range.end
936 && m.pa_range.start == (parent.pa_range.start + (m.va_range.start - parent.va_range.start)) as Paddr
937 && m.property == parent.property,
938 invariant_except_break
939 self.pt_cursor.inner.va <= end_va,
940 self.pt_cursor.inner.va < end_va ==> cursor_owner.in_locked_range(),
941 ensures
942 self.pt_cursor.inner.va >= end_va,
943 decreases end_va - self.pt_cursor.inner.va
944 )]
945 loop {
946 let ghost prev_va: Vaddr = cursor_owner@.cur_va;
947 let ghost prev_mappings: Set<Mapping> = cursor_owner@.mappings;
948
949 let ghost prev_view_inv: bool = cursor_owner@.inv();
950 proof {
951 cursor_owner.va.reflect_prop(self.pt_cursor.inner.va);
952 cursor_owner.view_preserves_inv();
953 }
954
955 // SAFETY: It is safe to un-map memory in the userspace.
956 let Some(frag) = (unsafe {
957 #[verus_spec(with Tracked(cursor_owner), Tracked(regions), Tracked(guards))]
958 self.pt_cursor.take_next(end_va - self.virt_addr())
959 }) else {
960 proof {
961 cursor_owner.va.reflect_prop(self.pt_cursor.inner.va);
962 // At break: take_next returned None, so no mappings in [prev_va, end_va).
963 // Any m with start >= prev_va leads to contradiction via the empty filter.
964 assert forall |m: Mapping|
965 #![auto] adjusted_base.contains(m) && !removed.contains(m)
966 && start_va <= m.va_range.start && m.va_range.start < end_va
967 implies m.va_range.start >= cursor_owner@.cur_va
968 || exists |parent: Mapping| #[trigger] old(cursor_owner)@.mappings.contains(parent)
969 && parent.va_range.start < start_va
970 && parent.va_range.start <= m.va_range.start
971 && m.va_range.end <= parent.va_range.end
972 && m.pa_range.start == (parent.pa_range.start + (m.va_range.start - parent.va_range.start)) as Paddr
973 && m.property == parent.property
974 by {
975 if m.va_range.start >= prev_va {
976 assert(prev_mappings.filter(
977 |m2: Mapping| prev_va <= m2.va_range.start < end_va,
978 ).contains(m));
979 assert(false);
980 }
981 };
982 }
983 break ;
984 };
985
986 let ghost old_adjusted = adjusted_base;
987 let ghost old_removed = removed;
988
989 proof {
990 cursor_owner.va.reflect_prop(self.pt_cursor.inner.va);
991 }
992
993 let ghost frag_ghost = frag;
994
995 match frag {
996 PageTableFrag::Mapped { va, item, .. } => {
997 let frame = item.frame;
998 proof {
999 crate::mm::page_table::lemma_vaddr_range_bounds_spec_user();
1000 // TODO: chain CursorView::inv bound
1001 // (`m.va_range.end <= vaddr_range_bounds_spec<C>.1 + 1`) to
1002 // the fits_usize precondition. Currently admitted.
1003 admit();
1004 crate::specs::mm::page_table::mapping_set_lemmas::lemma_mapping_set_cardinality_fits_usize(removed);
1005 }
1006 assert(num_unmapped < usize::MAX);
1007 num_unmapped += 1;
1008 #[verus_spec(with Tracked(tlb_model))]
1009 self.flusher.issue_tlb_flush_with(TlbFlushOp::Address(va), frame.into());
1010 },
1011 PageTableFrag::StrayPageTable { pt, va, len, num_frames } => {
1012 proof {
1013 let ghost new_removed = old_removed.union(
1014 prev_mappings.filter(|m2: Mapping| frag_ghost->StrayPageTable_va <= m2.va_range.start
1015 < frag_ghost->StrayPageTable_va + frag_ghost->StrayPageTable_len));
1016 assert(new_removed.subset_of(old_adjusted)) by {
1017 assert forall|m: Mapping| new_removed.contains(m)
1018 implies old_adjusted.contains(m) by {
1019 if prev_mappings.contains(m) {
1020 // m ∈ prev_mappings = old_adjusted \ old_removed ⊆ old_adjusted.
1021 }
1022 };
1023 };
1024 vstd::set::axiom_set_union_finite(
1025 old_removed,
1026 prev_mappings.filter(|m2: Mapping| frag_ghost->StrayPageTable_va <= m2.va_range.start
1027 < frag_ghost->StrayPageTable_va + frag_ghost->StrayPageTable_len));
1028 crate::specs::mm::page_table::mapping_set_lemmas::lemma_wf_subset(
1029 old_adjusted, new_removed);
1030 crate::mm::page_table::lemma_vaddr_range_bounds_spec_user();
1031 crate::specs::mm::page_table::mapping_set_lemmas::lemma_mapping_set_cardinality_fits_usize(
1032 new_removed);
1033 // |new_removed| = |old_removed| + |subtree| (disjoint).
1034 assert(old_removed.disjoint(
1035 prev_mappings.filter(|m2: Mapping| frag_ghost->StrayPageTable_va <= m2.va_range.start
1036 < frag_ghost->StrayPageTable_va + frag_ghost->StrayPageTable_len))) by {
1037 assert forall|m: Mapping| old_removed.contains(m) implies
1038 !prev_mappings.filter(|m2: Mapping| frag_ghost->StrayPageTable_va <= m2.va_range.start
1039 < frag_ghost->StrayPageTable_va + frag_ghost->StrayPageTable_len).contains(m) by {
1040 assert(!prev_mappings.contains(m));
1041 };
1042 };
1043 vstd::set::axiom_set_intersect_finite::<Mapping>(
1044 prev_mappings,
1045 Set::new(|m2: Mapping| frag_ghost->StrayPageTable_va <= m2.va_range.start
1046 < frag_ghost->StrayPageTable_va + frag_ghost->StrayPageTable_len));
1047 vstd::set_lib::lemma_set_disjoint_lens(
1048 old_removed,
1049 prev_mappings.filter(|m2: Mapping| frag_ghost->StrayPageTable_va <= m2.va_range.start
1050 < frag_ghost->StrayPageTable_va + frag_ghost->StrayPageTable_len));
1051 assert(num_unmapped + num_frames < usize::MAX);
1052 }
1053 num_unmapped += num_frames;
1054 proof {
1055 // va + len <= end_va: from take_next VA bound (StrayPageTable_va + StrayPageTable_len <= old_va + len_arg = end_va).
1056 // end_va <= MAX_USERSPACE_VADDR < KERNEL_VADDR_RANGE.end.
1057 assert(MAX_USERSPACE_VADDR < KERNEL_VADDR_RANGE.end as usize)
1058 by (compute_only);
1059 assert(va + len <= KERNEL_VADDR_RANGE.end as usize);
1060 crate::specs::mm::page_table::cursor::page_size_lemmas::lemma_va_plus_page_size_no_overflow(
1061 va, len);
1062 }
1063 #[verus_spec(with Tracked(tlb_model))]
1064 self.flusher.issue_tlb_flush_with(TlbFlushOp::Range(va..va + len), pt);
1065 },
1066 }
1067
1068 proof {
1069 // Ghost definitions for the two fragment cases.
1070 let ghost mm = match frag_ghost {
1071 PageTableFrag::Mapped { va: fv, item: fi, .. } => CursorView::<
1072 UserPtConfig,
1073 >::item_into_mapping(fv, fi),
1074 _ => arbitrary(),
1075 };
1076 let ghost sv = CursorView::<UserPtConfig> {
1077 cur_va: match frag_ghost {
1078 PageTableFrag::Mapped { va: fv, .. } => fv as Vaddr,
1079 _ => 0,
1080 },
1081 mappings: prev_mappings,
1082 phantom: PhantomData,
1083 };
1084 let ghost sm = sv.split_while_huge(mm.page_size).mappings;
1085 let ghost is_mapped = frag_ghost is Mapped;
1086 let ghost subtree = match frag_ghost {
1087 PageTableFrag::StrayPageTable { va: fv, len: fl, .. } => prev_mappings.filter(
1088 |m2: Mapping| fv <= m2.va_range.start < fv + fl,
1089 ),
1090 _ => Set::empty(),
1091 };
1092
1093 // Update ghost tracking variables.
1094 match frag_ghost {
1095 PageTableFrag::StrayPageTable { .. } => {
1096 removed = old_removed.union(subtree);
1097 },
1098 PageTableFrag::Mapped { .. } => {
1099 adjusted_base = sm.union(old_removed);
1100 removed = old_removed.union(set![mm]);
1101 },
1102 }
1103
1104 // Mapped-case setup: establish split_while_huge properties once.
1105 if is_mapped {
1106 assert(sv.cur_va < MAX_USERSPACE_VADDR);
1107 assert(prev_mappings.disjoint(old_removed)) by {
1108 assert forall|e: Mapping|
1109 prev_mappings.contains(e) implies !old_removed.contains(e) by {};
1110 };
1111 sv.split_while_huge_disjoint(mm.page_size, old_removed);
1112 sv.lemma_split_while_huge_preserves_inv(mm.page_size);
1113 }
1114
1115 // Prove |removed| tracking (disjointness + cardinality).
1116 match frag_ghost {
1117 PageTableFrag::StrayPageTable { .. } => {
1118 assert(old_removed.disjoint(subtree)) by {
1119 assert forall|e: Mapping|
1120 old_removed.contains(e) implies !subtree.contains(e) by {};
1121 };
1122 vstd::set::axiom_set_intersect_finite::<Mapping>(
1123 prev_mappings, Set::new(|m2: Mapping| subtree.contains(m2)));
1124 vstd::set_lib::lemma_set_disjoint_lens(old_removed, subtree);
1125 assert(removed =~= old_removed + subtree);
1126 },
1127 PageTableFrag::Mapped { .. } => {
1128 assert(old_removed.disjoint(set![mm])) by {
1129 assert forall |e: Mapping| #[trigger] old_removed.contains(e)
1130 implies !set![mm].contains(e) by {};
1131 };
1132 vstd::set::axiom_set_insert_finite(Set::<Mapping>::empty(), mm);
1133 vstd::set_lib::lemma_set_disjoint_lens(old_removed, set![mm]);
1134 assert(removed =~= old_removed + set![mm]);
1135 vstd::set_lib::lemma_set_empty_equivalency_len(Set::<Mapping>::empty());
1136 assert(set![mm] =~= Set::<Mapping>::empty().insert(mm));
1137 vstd::set::axiom_set_insert_len(Set::<Mapping>::empty(), mm);
1138 },
1139 }
1140
1141 // Maintain wf_mapping_set(adjusted_base) — only changes in Mapped case.
1142 if is_mapped {
1143 vstd::set::axiom_set_union_finite(sm, old_removed);
1144 crate::specs::mm::page_table::mapping_set_lemmas::lemma_wf_subset(
1145 old_adjusted, old_removed);
1146 assert forall|m: Mapping, n: Mapping|
1147 #[trigger] sm.contains(m) && #[trigger] old_removed.contains(n) implies
1148 m.va_range.end <= n.va_range.start || n.va_range.end <= m.va_range.start by {
1149 sv.split_while_huge_refinement(mm.page_size, m);
1150 assert(!prev_mappings.contains(n));
1151 if prev_mappings.contains(m) {
1152 } else {
1153 let p = choose |p: Mapping| #[trigger] prev_mappings.contains(p)
1154 && p.va_range.start <= m.va_range.start
1155 && m.va_range.end <= p.va_range.end
1156 && m.pa_range.start == (p.pa_range.start + (m.va_range.start - p.va_range.start)) as Paddr
1157 && m.property == p.property;
1158 assert(old_adjusted.contains(p));
1159 }
1160 };
1161 crate::specs::mm::page_table::mapping_set_lemmas::lemma_wf_union(
1162 sm, old_removed);
1163 }
1164
1165 // Maintain mappings =~= adjusted_base \ removed.
1166 assert forall |e: Mapping| #[trigger] adjusted_base.difference(removed).contains(e)
1167 <==> cursor_owner@.mappings.contains(e) by {};
1168 assert(cursor_owner@.mappings =~= adjusted_base.difference(removed));
1169
1170 assert(removed.subset_of(adjusted_base)) by {
1171 assert forall|e: Mapping| #[trigger]
1172 removed.contains(e) implies adjusted_base.contains(e) by {};
1173 };
1174
1175 // Maintain: not-yet-removed mappings in [start, end) are either
1176 // ahead of the cursor or sub-mappings of a boundary-straddling parent.
1177 assert forall |m: Mapping| #![auto] adjusted_base.contains(m) && !removed.contains(m)
1178 && start_va <= m.va_range.start && m.va_range.start < end_va
1179 implies m.va_range.start >= cursor_owner@.cur_va
1180 || exists |parent: Mapping| #[trigger] old(cursor_owner)@.mappings.contains(parent)
1181 && parent.va_range.start < start_va
1182 && parent.va_range.start <= m.va_range.start
1183 && m.va_range.end <= parent.va_range.end
1184 && m.pa_range.start == (parent.pa_range.start + (m.va_range.start - parent.va_range.start)) as Paddr
1185 && m.property == parent.property
1186 by {
1187 if m.va_range.start < cursor_owner@.cur_va {
1188 if m.va_range.start >= prev_va {
1189 // m was just processed — contradiction via empty filtered sets.
1190 match frag_ghost {
1191 PageTableFrag::StrayPageTable { va: frag_va, .. } => {
1192 if m.va_range.start >= frag_va {
1193 assert(cursor_owner@.mappings.filter(
1194 |m2: Mapping|
1195 frag_va <= m2.va_range.start
1196 < self.pt_cursor.inner.va,
1197 ).contains(m));
1198 } else {
1199 assert(prev_mappings.filter(
1200 |m2: Mapping| prev_va <= m2.va_range.start < frag_va,
1201 ).contains(m));
1202 }
1203 },
1204 PageTableFrag::Mapped { va: frag_va, .. } => {
1205 if m.va_range.start >= (frag_va as usize) {
1206 assert(cursor_owner@.mappings.filter(
1207 |m2: Mapping|
1208 (frag_va as usize) <= m2.va_range.start
1209 < self.pt_cursor.inner.va,
1210 ).contains(m));
1211 } else {
1212 assert(prev_mappings.filter(
1213 |m2: Mapping|
1214 prev_va <= m2.va_range.start < (frag_va as usize),
1215 ).contains(m));
1216 }
1217 },
1218 }
1219 assert(false);
1220 } else if is_mapped {
1221 // m.start < prev_va, m ∈ sm \ old_removed.
1222 assert(sm.contains(m));
1223 sv.split_while_huge_refinement(mm.page_size, m);
1224 if prev_mappings.contains(m) {
1225 // m ∈ old_adjusted \ old_removed — previous invariant applies directly.
1226 } else {
1227 // m is a sub-mapping of some p ∈ prev_mappings.
1228 let p = choose |p: Mapping| #[trigger] prev_mappings.contains(p)
1229 && p.va_range.start <= m.va_range.start
1230 && m.va_range.end <= p.va_range.end
1231 && m.pa_range.start == (p.pa_range.start + (m.va_range.start - p.va_range.start)) as Paddr
1232 && m.property == p.property;
1233 assert(old_adjusted.contains(p) && !old_removed.contains(p));
1234 if p.va_range.start < start_va {
1235 // p itself or its ancestor is the boundary parent.
1236 if !old(cursor_owner)@.mappings.contains(p) {
1237 let orig = choose |orig: Mapping|
1238 #[trigger] old(cursor_owner)@.mappings.contains(orig)
1239 && orig.va_range.start <= p.va_range.start
1240 && p.va_range.end <= orig.va_range.end
1241 && p.pa_range.start == (orig.pa_range.start + (p.va_range.start - orig.va_range.start)) as Paddr
1242 && p.property == orig.property;
1243 assert(orig.inv());
1244 assert(m.inv());
1245 crate::specs::mm::page_table::mapping_set_lemmas::lemma_sub_mapping_pa_compose(m, p, orig);
1246 }
1247 } else if p.va_range.start >= end_va {
1248 assert(false); // p.start <= m.start < end_va, contradiction.
1249 } else {
1250 // start_va <= p.start < end_va, p.start < prev_va.
1251 // Previous invariant gives boundary ancestor orig.
1252 let orig = choose |orig: Mapping|
1253 #[trigger] old(cursor_owner)@.mappings.contains(orig)
1254 && orig.va_range.start < start_va
1255 && orig.va_range.start <= p.va_range.start
1256 && p.va_range.end <= orig.va_range.end
1257 && p.pa_range.start == (orig.pa_range.start + (p.va_range.start - orig.va_range.start)) as Paddr
1258 && p.property == orig.property;
1259 assert(orig.inv());
1260 assert(m.inv());
1261 crate::specs::mm::page_table::mapping_set_lemmas::lemma_sub_mapping_pa_compose(m, p, orig);
1262 }
1263 }
1264 }
1265 }
1266 };
1267
1268 // Maintain: old mappings outside [start, end) survive in adjusted_base.
1269 if is_mapped {
1270 assert forall |m: Mapping| old(cursor_owner)@.mappings.contains(m)
1271 && (m.va_range.end <= start_va || m.va_range.start >= end_va)
1272 implies #[trigger] adjusted_base.contains(m) by {
1273 if m.va_range.end <= start_va { assert(m.inv()); }
1274 sv.split_while_huge_locality(mm.page_size, m);
1275 };
1276
1277 // Maintain: refinement — every mapping in adjusted_base comes from
1278 // old mappings or is a sub-mapping of one.
1279 assert forall |m: Mapping| #[trigger] adjusted_base.contains(m) implies
1280 old(cursor_owner)@.mappings.contains(m)
1281 || exists |parent: Mapping| #[trigger] old(cursor_owner)@.mappings.contains(parent)
1282 && parent.va_range.start <= m.va_range.start
1283 && m.va_range.end <= parent.va_range.end
1284 && m.pa_range.start == (parent.pa_range.start + (m.va_range.start - parent.va_range.start)) as Paddr
1285 && m.property == parent.property
1286 by {
1287 if !old_removed.contains(m) {
1288 sv.split_while_huge_refinement(mm.page_size, m);
1289 if !prev_mappings.contains(m) {
1290 let p = choose |p: Mapping| #[trigger] prev_mappings.contains(p)
1291 && p.va_range.start <= m.va_range.start
1292 && m.va_range.end <= p.va_range.end
1293 && m.pa_range.start == (p.pa_range.start + (m.va_range.start - p.va_range.start)) as Paddr
1294 && m.property == p.property;
1295 assert(old_adjusted.contains(p));
1296 if !old(cursor_owner)@.mappings.contains(p) {
1297 let orig = choose |orig: Mapping|
1298 #[trigger] old(cursor_owner)@.mappings.contains(orig)
1299 && orig.va_range.start <= p.va_range.start
1300 && p.va_range.end <= orig.va_range.end
1301 && p.pa_range.start == (orig.pa_range.start + (
1302 p.va_range.start - orig.va_range.start)) as Paddr
1303 && p.property == orig.property;
1304 assert(orig.inv());
1305 assert(m.inv());
1306 crate::specs::mm::page_table::mapping_set_lemmas::lemma_sub_mapping_pa_compose(m, p, orig);
1307 }
1308 }
1309 }
1310 }
1311 };
1312 }
1313 }
1314 proof {
1315 cursor_owner.va.reflect_prop(self.pt_cursor.inner.va);
1316
1317 let old_view = old(self).pt_cursor.inner.model(*old(cursor_owner));
1318 let new_view = self.pt_cursor.inner.model(*cursor_owner);
1319
1320 // Bridge from loop invariant to unmap_spec.
1321 let start = old_view.cur_va;
1322 let end = (old_view.cur_va + len) as Vaddr;
1323
1324 assert(new_view.cur_va >= end);
1325
1326 assert forall |m: Mapping| #![auto] old_view.mappings.contains(m)
1327 && (m.va_range.end <= start || m.va_range.start >= end)
1328 implies new_view.mappings.contains(m) by {
1329 assert(adjusted_base.contains(m));
1330 if m.va_range.end <= start {
1331 assert(m.inv());
1332 }
1333 };
1334
1335 assert forall |m: Mapping| new_view.mappings.contains(m)
1336 && start <= m.va_range.start < end
1337 implies exists |parent: Mapping| #[trigger] old_view.mappings.contains(parent)
1338 && parent.va_range.start < start
1339 && parent.va_range.start <= m.va_range.start
1340 && m.va_range.end <= parent.va_range.end
1341 && m.pa_range.start == (parent.pa_range.start + (m.va_range.start - parent.va_range.start)) as Paddr
1342 && m.property == parent.property
1343 by { };
1344
1345 assert forall |m: Mapping| new_view.mappings.contains(m)
1346 implies old_view.mappings.contains(m)
1347 || exists |parent: Mapping| #[trigger] old_view.mappings.contains(parent)
1348 && parent.va_range.start <= m.va_range.start
1349 && m.va_range.end <= parent.va_range.end
1350 && m.pa_range.start == (parent.pa_range.start + (m.va_range.start - parent.va_range.start)) as Paddr
1351 && m.property == parent.property
1352 by { };
1353 }
1354
1355 #[verus_spec(with Tracked(tlb_model))]
1356 self.flusher.dispatch_tlb_flush();
1357
1358 num_unmapped
1359 }
1360
1361 /// Applies the operation to the next slot of mapping within the range.
1362 ///
1363 /// The range to be found in is the current virtual address with the
1364 /// provided length.
1365 ///
1366 /// The function stops and yields the actually protected range if it has
1367 /// actually protected a page, no matter if the following pages are also
1368 /// required to be protected.
1369 ///
1370 /// It also makes the cursor moves forward to the next page after the
1371 /// protected one. If no mapped pages exist in the following range, the
1372 /// cursor will stop at the end of the range and return [`None`].
1373 ///
1374 /// Note that it will **NOT** flush the TLB after the operation. Please
1375 /// make the decision yourself on when and how to flush the TLB using
1376 /// [`Self::flusher`].
1377 ///
1378 /// # Verified Properties
1379 /// ## Preconditions
1380 /// - **Safety Invariants**: The page table cursor safety invariants
1381 /// ([`invariants`](crate::mm::page_table::Cursor::invariants)) and the
1382 /// meta-region invariants must hold before the call.
1383 /// - The cursor must be within the locked range and below the guard level.
1384 /// - The current entry must be a mapped frame (not absent or a page table node).
1385 /// - **Liveness**: The length must be page-aligned and within the remaining cursor range.
1386 /// ## Postconditions
1387 /// - **Correctness**: If the metadata region was well-formed before the call, it will be
1388 /// well-formed after.
1389 /// ## Panics
1390 /// Panics if the length is longer than the remaining range of the cursor.
1391 /// ## Safety
1392 /// - From a soundness perspective changing a userspace page's `prop` field is safe.
1393 #[verus_spec(r =>
1394 with Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
1395 Tracked(regions): Tracked<&mut MetaRegionOwners>,
1396 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>,
1397 requires
1398 old(self).pt_cursor.inner.invariants(*old(owner), *old(regions), *old(guards)),
1399 forall |p: PageProperty| op.requires((p,)),
1400 ensures
1401 !old(self).pt_cursor.inner.find_next_panic_condition(len),
1402 final(self).pt_cursor.inner.invariants(*final(owner), *final(regions), *final(guards)),
1403 final(self).pt_cursor.inner.barrier_va == old(self).pt_cursor.inner.barrier_va,
1404 )]
1405 pub fn protect_next(
1406 &mut self,
1407 len: usize,
1408 op: impl FnOnce(PageProperty) -> PageProperty,
1409 ) -> Option<Range<Vaddr>> {
1410 // SAFETY: It is safe to protect memory in the userspace.
1411 unsafe {
1412 #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
1413 self.pt_cursor.protect_next(len, op)
1414 }
1415 }
1416}
1417
1418/*cpu_local_cell! {
1419 /// The `Arc` pointer to the activated VM space on this CPU. If the pointer
1420 /// is NULL, it means that the activated page table is merely the kernel
1421 /// page table.
1422 // TODO: If we are enabling ASID, we need to maintain the TLB state of each
1423 // CPU, rather than merely the activated `VmSpace`. When ASID is enabled,
1424 // the non-active `VmSpace`s can still have their TLB entries in the CPU!
1425 static ACTIVATED_VM_SPACE: *const VmSpace = core::ptr::null();
1426}*/
1427
1428/*#[cfg(ktest)]
1429pub(super) fn get_activated_vm_space() -> *const VmSpace {
1430 ACTIVATED_VM_SPACE.load()
1431}*/
1432
1433/// The configuration for user page tables.
1434#[derive(Clone, Debug)]
1435pub struct UserPtConfig {}
1436
1437/// The item that can be mapped into the [`VmSpace`].
1438pub struct MappedItem {
1439 pub frame: UFrame,
1440 pub prop: PageProperty,
1441}
1442
1443#[verus_verify]
1444impl RCClone for MappedItem {
1445 open spec fn clone_requires(self, perm: MetaRegionOwners) -> bool {
1446 self.frame.clone_requires(perm)
1447 }
1448
1449 open spec fn clone_ensures(self, old_perm: MetaRegionOwners, new_perm: MetaRegionOwners, res: Self) -> bool {
1450 self.frame.clone_ensures(old_perm, new_perm, res.frame)
1451 }
1452
1453 fn clone(&self, Tracked(perm): Tracked<&mut MetaRegionOwners>) -> (res: Self)
1454 {
1455 let frame = self.frame.clone(Tracked(perm));
1456 Self {
1457 frame,
1458 prop: self.prop,
1459 }
1460 }
1461}
1462
1463// SAFETY: `item_into_raw` and `item_from_raw` are implemented correctly,
1464unsafe impl PageTableConfig for UserPtConfig {
1465 open spec fn TOP_LEVEL_INDEX_RANGE_spec() -> Range<usize> {
1466 0..256
1467 }
1468
1469 open spec fn TOP_LEVEL_CAN_UNMAP_spec() -> (b: bool) {
1470 true
1471 }
1472
1473 fn TOP_LEVEL_INDEX_RANGE() -> Range<usize> {
1474 0..256
1475 }
1476
1477 fn TOP_LEVEL_CAN_UNMAP() -> (b: bool) {
1478 true
1479 }
1480
1481 type E = PageTableEntry;
1482
1483 type C = PagingConsts;
1484
1485 type Item = MappedItem;
1486
1487 open spec fn item_into_raw_spec(item: Self::Item) -> (Paddr, PagingLevel, PageProperty) {
1488 (item.frame.paddr(), 1, item.prop)
1489 }
1490
1491 #[verifier::external_body]
1492 fn item_into_raw(item: Self::Item) -> (Paddr, PagingLevel, PageProperty) {
1493 let MappedItem { frame, prop } = item;
1494 let level = frame.map_level();
1495 let paddr = frame.into_raw();
1496 (paddr, level, prop)
1497 }
1498
1499 uninterp spec fn item_from_raw_spec(
1500 paddr: Paddr,
1501 level: PagingLevel,
1502 prop: PageProperty,
1503 ) -> Self::Item;
1504
1505 #[verifier::external_body]
1506 fn item_from_raw(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self::Item {
1507 let frame = unsafe { UFrame::from_raw(paddr) };
1508 MappedItem { frame, prop }
1509 }
1510
1511 axiom fn axiom_nr_subpage_per_huge_eq_nr_entries();
1512
1513 axiom fn item_roundtrip(item: Self::Item, paddr: Paddr, level: PagingLevel, prop: PageProperty);
1514
1515 proof fn clone_ensures_concrete(
1516 item: Self::Item,
1517 pa: Paddr,
1518 old_regions: MetaRegionOwners,
1519 new_regions: MetaRegionOwners,
1520 res: Self::Item,
1521 ) {
1522 admit();
1523 }
1524
1525 proof fn clone_requires_concrete(
1526 item: Self::Item,
1527 pa: Paddr,
1528 level: PagingLevel,
1529 prop: PageProperty,
1530 regions: MetaRegionOwners,
1531 ) {
1532 admit();
1533 }
1534}
1535
1536} // verus!