ostd/mm/vm_space.rs
1// SPDX-License-Identifier: MPL-2.0
2//! Virtual memory space management.
3//!
4//! The [`VmSpace`] struct is provided to manage the virtual memory space of a
5//! user. Cursors are used to traverse and modify over the virtual memory space
6//! concurrently. The VM space cursor [`self::Cursor`] is just a wrapper over
7//! the page table cursor, providing efficient, powerful concurrent accesses
8//! to the page table.
9use alloc::vec::Vec;
10use vstd::pervasive::{arbitrary, proof_from_false};
11use vstd::prelude::*;
12use vstd::simple_pptr::PointsTo;
13use vstd::atomic::PermissionU64;
14
15use crate::specs::mm::virt_mem_newer::{MemView, VirtPtr};
16
17use crate::error::Error;
18use crate::mm::frame::untyped::UFrame;
19use crate::mm::io::VmIoMemView;
20use crate::mm::page_table::*;
21use crate::mm::page_table::{EntryOwner, PageTableFrag, PageTableGuard};
22use crate::mm::frame::MetaSlot;
23use crate::specs::arch::*;
24use crate::specs::mm::frame::mapping::meta_to_frame;
25use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
26use crate::specs::mm::frame::meta_owners::{MetaPerm, MetaSlotStorage, MetadataInnerPerms};
27use crate::specs::mm::page_table::cursor::owners::CursorOwner;
28use crate::specs::mm::page_table::*;
29use crate::specs::mm::tlb::TlbModel;
30use crate::specs::task::InAtomicMode;
31use core::marker::PhantomData;
32use core::{ops::Range, sync::atomic::Ordering};
33use vstd_extra::ghost_tree::*;
34
35use crate::mm::tlb::*;
36use crate::specs::mm::cpu::AtomicCpuSet;
37
38use crate::{
39 mm::{
40 io::{VmIoOwner, VmReader, VmWriter},
41 page_prop::PageProperty,
42 Paddr, PagingConstsTrait, PagingLevel, Vaddr, MAX_USERSPACE_VADDR,
43 },
44 prelude::*,
45};
46
47use alloc::sync::Arc;
48
49#[path = "../../specs/mm/vm_space.rs"]
50pub mod vm_space_specs;
51use vm_space_specs::*;
52
53verus! {
54
55/// A virtual address space for user-mode tasks, enabling safe manipulation of user-space memory.
56///
57/// The [`VmSpace`] type provides memory isolation guarantees between user-space and
58/// kernel-space. For example, given an arbitrary user-space pointer, one can read and
59/// write the memory location referred to by the user-space pointer without the risk of
60/// breaking the memory safety of the kernel space.
61///
62/// # Task Association Semantics
63///
64/// As far as OSTD is concerned, a [`VmSpace`] is not necessarily associated with a task. Once a
65/// [`VmSpace`] is activated (see [`VmSpace::activate`]), it remains activated until another
66/// [`VmSpace`] is activated **possibly by another task running on the same CPU**.
67///
68/// This means that it's up to the kernel to ensure that a task's [`VmSpace`] is always activated
69/// while the task is running. This can be done by using the injected post schedule handler
70/// (see [`inject_post_schedule_handler`]) to always activate the correct [`VmSpace`] after each
71/// context switch.
72///
73/// If the kernel otherwise decides not to ensure that the running task's [`VmSpace`] is always
74/// activated, the kernel must deal with race conditions when calling methods that require the
75/// [`VmSpace`] to be activated, e.g., [`UserMode::execute`], [`VmReader`] and [`VmWriter`].
76/// Otherwise, the behavior is unspecified, though it's guaranteed _not_ to compromise the kernel's
77/// memory safety.
78///
79/// # Memory Backing
80///
81/// A newly-created [`VmSpace`] is not backed by any physical memory pages. To
82/// provide memory pages for a [`VmSpace`], one can allocate and map physical
83/// memory ([`UFrame`]s) to the [`VmSpace`] using the cursor.
84///
85/// A [`VmSpace`] can also attach a page fault handler, which will be invoked to
86/// handle page faults generated from user space.
87///
88/// [`inject_post_schedule_handler`]: crate::task::inject_post_schedule_handler
89/// [`UserMode::execute`]: crate::user::UserMode::execute
90/// [`VmReader`]: crate::mm::io::VmWriter
91/// [`VmReader`]: crate::mm::io::VmReader
92/// # Verification Design
93///
94/// A [`VmSpace`] has a corresponding [`VmSpaceOwner`] object that is used to track its state,
95/// and against which its invariants are stated. The [`VmSpaceOwner`] catalogues the readers and writers
96/// that are associated with the [`VmSpace`], and the [`MemView`] which encodes the active page table and
97/// the subset of the TLB that covers the same virtual address space.
98/// All proofs about the correctness of the readers and writers are founded on the well-formedness of the [`MemView`]:
99///
100/// ```rust
101/// open spec fn mem_view_wf(self) -> bool {
102/// &&& self.mem_view is Some <==> self.mv_range@ is Some
103/// // This requires that TotalMapping (mvv) = mv ∪ writer mappings ∪ reader mappings
104/// &&& self.mem_view matches Some(remaining_view)
105/// ==> self.mv_range@ matches Some(total_view)
106/// ==> {
107/// &&& remaining_view.mappings_are_disjoint()
108/// &&& remaining_view.mappings.finite()
109/// &&& total_view.mappings_are_disjoint()
110/// &&& total_view.mappings.finite()
111/// // ======================
112/// // Remaining Consistency
113/// // ======================
114/// &&& remaining_view.mappings.subset_of(total_view.mappings)
115/// &&& remaining_view.memory.dom().subset_of(
116/// total_view.memory.dom(),
117/// )
118/// // =====================
119/// // Total View Consistency
120/// // =====================
121/// &&& forall|va: usize|
122/// remaining_view.addr_transl(va) == total_view.addr_transl(
123/// va,
124/// )
125/// // =====================
126/// // Writer correctness
127/// // =====================
128/// &&& forall|i: int|
129/// 0 <= i < self.writers.len() as int ==> {
130/// &&& self.writers[i].inv()
131/// }
132/// }
133/// }
134/// }
135/// ```
136pub struct VmSpace<'a> {
137 pub pt: PageTable<UserPtConfig>,
138 pub cpus: AtomicCpuSet,
139 pub _marker: PhantomData<&'a ()>,
140}
141
142type Result<A> = core::result::Result<A, Error>;
143
144#[verus_verify]
145impl<'a> VmSpace<'a> {
146 /// A spec function to create a new [`VmSpace`] instance.
147 ///
148 /// The reason why this function is marked as `uninterp` is that the implementation details
149 /// of the [`VmSpace`] struct are not important for the verification of its clients.
150 pub uninterp spec fn new_spec() -> Self;
151
152 /// Checks the preconditions for creating a reader or writer for the given virtual address range.
153 ///
154 /// Essentially, this requires that
155 ///
156 /// - the invariants of the [`VmSpace`] and [`VmSpaceOwner`] hold
157 /// (see [`VmSpaceOwner::inv_with`] and [`VmSpaceOwner::inv`]);
158 /// - the [`VmSpaceOwner`] is active (via some threads or activation function);
159 /// - the [`VmSpaceOwner`] can create a reader or writer for the given virtual address range
160 /// (see [`VmSpaceOwner::can_create_reader`] and [`VmSpaceOwner::can_create_writer`]);
161 /// - the virtual address range is valid (non-zero, non-empty, and within user-space limits);
162 /// - the currently active page table is the one owned by the [`VmSpace`] (note that this is
163 /// an `uninterp` spec function as this is non-trackable during verification).
164 pub open spec fn reader_requires(
165 &self,
166 vm_owner: VmSpaceOwner<'a>,
167 vaddr: Vaddr,
168 len: usize,
169 ) -> bool {
170 &&& vm_owner.inv()
171 &&& vm_owner.active
172 &&& vm_owner.can_create_reader(vaddr, len)
173 &&& vaddr != 0 && len > 0 && vaddr + len <= MAX_USERSPACE_VADDR
174 &&& current_page_table_paddr_spec() == self.pt.root_paddr_spec()
175 }
176
177 /// Checks the preconditions for creating a writer for the given virtual address range.
178 ///
179 /// Most of the pre-conditions are the same as those for creating a reader (see
180 /// [`Self::reader_requires`]), except that the caller must also have permission to
181 /// create a writer for the given virtual address range.
182 pub open spec fn writer_requires(
183 &self,
184 vm_owner: VmSpaceOwner<'a>,
185 vaddr: Vaddr,
186 len: usize,
187 ) -> bool {
188 &&& vm_owner.inv()
189 &&& vm_owner.active
190 &&& vm_owner.can_create_writer(vaddr, len)
191 &&& vaddr != 0 && len > 0 && vaddr + len <= MAX_USERSPACE_VADDR
192 &&& current_page_table_paddr_spec() == self.pt.root_paddr_spec()
193 }
194
195 /// The guarantees of the created reader or writer, assuming the preconditions are satisfied.
196 ///
197 /// Essentially, this ensures that
198 ///
199 /// - the invariants of the new [`VmSpace`] and the reader or writer hold;
200 /// - the reader or writer is associated with a [`VmIoOwner`] that is well-formed with respect to
201 /// the reader or writer;
202 /// - the reader or writer has no memory view, as the memory view will be taken from the
203 /// [`VmSpaceOwner`] when the reader or writer is activated.
204 ///
205 /// # Special Note
206 ///
207 /// The newly created instance of [`VmReader`] and its associated [`VmIoOwner`] are not yet
208 /// activated, so the guarantees about the memory view only require that the memory view is
209 /// [`None`]. The guarantees about the memory view will be provided by the activation function
210 /// (see [`VmSpaceOwner::activate_reader`] and [`VmSpaceOwner::activate_writer`]).
211 ///
212 /// We avoid mixing the creation, usage and deletion into one giant function as this would
213 /// create some unnecessary life-cycle management complexities and not really help with the
214 /// verification itself.
215 pub open spec fn reader_ensures(
216 &self,
217 vm_owner_old: VmSpaceOwner<'_>,
218 vm_owner_new: VmSpaceOwner<'_>,
219 vaddr: Vaddr,
220 len: usize,
221 r: Result<VmReader<'_>>,
222 r_owner: Option<VmIoOwner<'_>>,
223 ) -> bool {
224 &&& vm_owner_new.inv()
225 &&& vm_owner_new.readers == vm_owner_old.readers
226 &&& vm_owner_new.writers == vm_owner_old.writers
227 &&& r matches Ok(reader) && r_owner matches Some(owner) ==> {
228 &&& reader.inv()
229 &&& owner.inv_with_reader(reader)
230 &&& owner.mem_view is None
231 }
232 }
233
234 /// The guarantees of the created writer, assuming the preconditions are satisfied.
235 ///
236 /// Most of the guarantees are the same as those for creating a reader (see
237 /// [`Self::reader_ensures`]), except that the writer is associated with a [`VmIoOwner`] that is well-formed with respect to the writer.
238 pub open spec fn writer_ensures(
239 &self,
240 vm_owner_old: VmSpaceOwner<'a>,
241 vm_owner_new: VmSpaceOwner<'a>,
242 vaddr: Vaddr,
243 len: usize,
244 r: Result<VmWriter<'a>>,
245 r_owner: Option<VmIoOwner<'a>>,
246 ) -> bool {
247 &&& vm_owner_new.inv()
248 &&& vm_owner_new.readers == vm_owner_old.readers
249 &&& vm_owner_new.writers == vm_owner_old.writers
250 &&& r matches Ok(writer) && r_owner matches Some(owner) ==> {
251 &&& writer.inv()
252 &&& owner.inv_with_writer(writer)
253 &&& owner.mem_view is None
254 }
255 }
256
257 /// Creates a new VM address space.
258 ///
259 /// # Verification Design
260 ///
261 /// This function is marked as `external_body` for now as the current design does not entail
262 /// the concrete implementation details of the underlying data structure of the [`VmSpace`].
263 ///
264 /// ## Preconditions
265 /// None
266 ///
267 /// ## Postconditions
268 /// - The returned [`VmSpace`] instance satisfies the invariants of [`VmSpace`]
269 /// - The returned [`VmSpace`] instance is equal to the one created by the [`Self::new_spec`]
270 /// function, which is an `uninterp` function that can be used in specifications.
271 #[inline]
272 #[verifier::external_body]
273 #[verifier::when_used_as_spec(new_spec)]
274 #[verus_spec(r =>
275 ensures
276 r == Self::new_spec(),
277 )]
278 pub fn new() -> Self {
279 unimplemented!()
280 }
281
282 /// Gets an immutable cursor in the virtual address range.
283 ///
284 /// The cursor behaves like a lock guard, exclusively owning a sub-tree of
285 /// the page table, preventing others from creating a cursor in it. So be
286 /// sure to drop the cursor as soon as possible.
287 ///
288 /// The creation of the cursor may block if another cursor having an
289 /// overlapping range is alive.
290 #[verifier::external_body]
291 pub fn cursor<G: InAtomicMode>(&'a self, guard: &'a G, va: &Range<Vaddr>) -> Result<
292 Cursor<'a, G>,
293 > {
294 Ok(self.pt.cursor(guard, va).map(|pt_cursor| Cursor(pt_cursor.0))?)
295 }
296
297 /// Gets an mutable cursor in the virtual address range.
298 ///
299 /// The same as [`Self::cursor`], the cursor behaves like a lock guard,
300 /// exclusively owning a sub-tree of the page table, preventing others
301 /// from creating a cursor in it. So be sure to drop the cursor as soon as
302 /// possible.
303 ///
304 /// The creation of the cursor may block if another cursor having an
305 /// overlapping range is alive. The modification to the mapping by the
306 /// cursor may also block or be overridden the mapping of another cursor.
307 #[verifier::external_body]
308 pub fn cursor_mut<G: InAtomicMode>(&'a self, guard: &'a G, va: &Range<Vaddr>) -> Result<CursorMut<'a, G>> {
309 Ok(
310 self.pt.cursor_mut(guard, va).map(
311 |pt_cursor|
312 CursorMut {
313 pt_cursor: pt_cursor.0,
314 flusher: TlbFlusher::new(
315 &self.cpus /*, disable_preempt()*/
316 ,
317 ),
318 },
319 )?,
320 )
321 }
322
323 /// Creates a reader to read data from the user space of the current task.
324 ///
325 /// Returns [`Err`] if this [`VmSpace`] is not belonged to the user space of the current task
326 /// or the `vaddr` and `len` do not represent a user space memory range.
327 ///
328 /// Users must ensure that no other page table is activated in the current task during the
329 /// lifetime of the created [`VmReader`]. This guarantees that the [`VmReader`] can operate
330 /// correctly.
331 /// # Verified Properties
332 /// ## Preconditions
333 /// - The [`VmSpace`] invariants must hold with respect to the [`VmSpaceOwner`], which must be active.
334 /// - The range `vaddr..vaddr + len` must represent a user space memory range.
335 /// - The [`VmSpaceOwner`] must not have any active writers overlapping with the range `vaddr..vaddr + len`.
336 /// ## Postconditions
337 /// - An inactive [`VmReader`] will be created with the range `vaddr..vaddr + len`.
338 /// ## Safety
339 /// - The function preserves all memory invariants.
340 /// - By requiring that the [`VmSpaceOwner`] must not have any active writers overlapping with the target range,
341 /// it prevents data races between the reader and any writers.
342 #[inline]
343 #[verus_spec(r =>
344 with
345 Tracked(owner): Tracked<&'a mut VmSpaceOwner<'a>>,
346 -> vm_reader_owner: Tracked<Option<VmIoOwner<'a>>>,
347 requires
348 self.reader_requires(*old(owner), vaddr, len),
349 ensures
350 self.reader_ensures(*old(owner), *owner, vaddr, len, r, vm_reader_owner@),
351 )]
352 pub fn reader(&self, vaddr: Vaddr, len: usize) -> Result<VmReader<'a>> {
353 let vptr = VirtPtr::from_vaddr(vaddr, len);
354 let ghost id = owner.new_vm_io_id();
355 proof_decl! {
356 let tracked mut vm_reader_owner;
357 }
358 // SAFETY: The memory range is in user space, as checked above.
359 let reader = unsafe {
360 proof_with!(Ghost(id) => Tracked(vm_reader_owner));
361 VmReader::from_user_space(vptr)
362 };
363
364 proof_with!(|= Tracked(Some(vm_reader_owner)));
365 Ok(reader)
366 }
367
368 /// Returns [`Err`] if this [`VmSpace`] is not belonged to the user space of the current task
369 /// or the `vaddr` and `len` do not represent a user space memory range.
370 ///
371 /// Users must ensure that no other page table is activated in the current task during the
372 /// lifetime of the created [`VmWriter`]. This guarantees that the [`VmWriter`] can operate correctly.
373 /// # Verified Properties
374 /// ## Preconditions
375 /// - The [`VmSpace`] invariants must hold with respect to the [`VmSpaceOwner`], which must be active.
376 /// - The range `vaddr..vaddr + len` must represent a user space memory range.
377 /// - The [`VmSpaceOwner`] must not have any active readers or writers overlapping with the range `vaddr..vaddr + len`.
378 /// ## Postconditions
379 /// - An inactive [`VmWriter`] will be created with the range `vaddr..vaddr + len`.
380 /// ## Safety
381 /// - The function preserves all memory invariants.
382 /// - By requiring that the [`VmSpaceOwner`] must not have any active readers or writers overlapping with the target range,
383 /// it prevents data races.
384 #[inline]
385 #[verus_spec(r =>
386 with
387 Tracked(owner): Tracked<&mut VmSpaceOwner<'a>>,
388 -> new_owner: Tracked<Option<VmIoOwner<'a>>>,
389 requires
390 self.writer_requires(*old(owner), vaddr, len),
391 ensures
392 self.writer_ensures(*old(owner), *owner, vaddr, len, r, new_owner@),
393 )]
394 pub fn writer(self, vaddr: Vaddr, len: usize) -> Result<VmWriter<'a>> {
395 let vptr = VirtPtr::from_vaddr(vaddr, len);
396 let ghost id = owner.new_vm_io_id();
397 proof_decl! {
398 let tracked mut vm_writer_owner;
399 }
400
401 // `VmWriter` is neither `Sync` nor `Send`, so it will not live longer than the current
402 // task. This ensures that the correct page table is activated during the usage period of
403 // the `VmWriter`.
404 //
405 // SAFETY: The memory range is in user space, as checked above.
406 let writer = unsafe {
407 proof_with!(Ghost(id) => Tracked(vm_writer_owner));
408 VmWriter::from_user_space(vptr)
409 };
410
411 proof_with!(|= Tracked(Some(vm_writer_owner)));
412 Ok(writer)
413 }
414}
415
416/// The cursor for querying over the VM space without modifying it.
417///
418/// It exclusively owns a sub-tree of the page table, preventing others from
419/// reading or modifying the same sub-tree. Two read-only cursors can not be
420/// created from the same virtual address range either.
421pub struct Cursor<'a, A: InAtomicMode>(pub crate::mm::page_table::Cursor<'a, UserPtConfig, A>);
422
423#[verus_verify]
424impl<'rcu, A: InAtomicMode> Cursor<'rcu, A> {
425 pub open spec fn query_success_requires(self) -> bool {
426 self.0.barrier_va.start <= self.0.va < self.0.barrier_va.end
427 }
428
429 pub open spec fn query_success_ensures(
430 self,
431 view: CursorView<UserPtConfig>,
432 range: Range<Vaddr>,
433 item: Option<MappedItem>,
434 ) -> bool {
435 if view.present() {
436 &&& item is Some
437 &&& view.query_item_spec(item.unwrap()) == Some(range)
438 } else {
439 &&& range.start == self.0.va
440 &&& item is None
441 }
442 }
443
444 /// Queries the mapping at the current virtual address.
445 ///
446 /// If the cursor is pointing to a valid virtual address that is locked,
447 /// it will return the virtual address range and the mapped item.
448 /// ## Preconditions
449 /// - All system invariants must hold
450 /// - **Liveness**: The function will return an error if the cursor is not within the locked range
451 /// ## Postconditions
452 /// - If there is a mapped item at the current virtual address ([`query_some_condition`]),
453 /// it is returned along with the virtual address range that it maps ([`query_success_ensures`]).
454 /// - The mapping that is returned corresponds to the abstract mapping given by [`query_item_spec`](CursorView::query_item_spec).
455 /// - If there is no mapped item at the current virtual address ([`query_none_condition`]),
456 /// it returns [`None`], and the virtual address range of the cursor's current position.
457 /// ## Safety
458 /// - This function preserves all memory invariants.
459 /// - The locking mechanism prevents data races.
460 #[verus_spec(r =>
461 with Tracked(owner): Tracked<&mut CursorOwner<'rcu, UserPtConfig>>,
462 Tracked(regions): Tracked<&mut MetaRegionOwners>,
463 Tracked(guards): Tracked<&mut Guards<'rcu, UserPtConfig>>
464 requires
465 old(self).0.invariants(*old(owner), *old(regions), *old(guards)),
466 old(owner).in_locked_range(),
467 ensures
468 self.0.invariants(*owner, *regions, *guards),
469 self.0.query_some_condition(*owner) ==> {
470 &&& r is Ok
471 &&& self.0.query_some_ensures(*owner, r.unwrap())
472 },
473 !self.0.query_some_condition(*owner) ==> {
474 &&& r is Ok
475 &&& self.0.query_none_ensures(*owner, r.unwrap())
476 },
477 )]
478 pub fn query(&mut self) -> Result<(Range<Vaddr>, Option<MappedItem>)> {
479 Ok(
480 #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
481 self.0.query()?,
482 )
483 }
484
485 /// Moves the cursor forward to the next mapped virtual address.
486 ///
487 /// If there is mapped virtual address following the current address within
488 /// next `len` bytes, it will return that mapped address. In this case,
489 /// the cursor will stop at the mapped address.
490 ///
491 /// Otherwise, it will return `None`. And the cursor may stop at any
492 /// address after `len` bytes.
493 ///
494 /// # Verified Properties
495 /// ## Preconditions
496 /// - **Liveness**: The cursor must be within the locked range and below the guard level.
497 /// - **Liveness**: The length must be page-aligned and less than or equal to the remaining range of the cursor.
498 /// ## Postconditions
499 /// - If there is a mapped address after the current address within the next `len` bytes,
500 /// it will move the cursor to the next mapped address and return it.
501 /// - If not, it will return `None`. The cursor may stop at any
502 /// address after `len` bytes, but it will not move past the barrier address.
503 /// ## Panics
504 /// This method panics if the length is longer than the remaining range of the cursor.
505 /// ## Safety
506 /// This function preserves all memory invariants.
507 /// Because it panics rather than move the cursor to an invalid address,
508 /// it ensures that the cursor is safe to use after the call.
509 /// The locking mechanism prevents data races.
510 #[verus_spec(res =>
511 with Tracked(owner): Tracked<&mut CursorOwner<'rcu, UserPtConfig>>,
512 Tracked(regions): Tracked<&mut MetaRegionOwners>,
513 Tracked(guards): Tracked<&mut Guards<'rcu, UserPtConfig>>
514 requires
515 old(self).0.invariants(*old(owner), *old(regions), *old(guards)),
516 old(self).0.level < old(self).0.guard_level,
517 old(owner).in_locked_range(),
518 len % PAGE_SIZE == 0,
519 old(self).0.va + len <= old(self).0.barrier_va.end,
520 ensures
521 self.0.invariants(*owner, *regions, *guards),
522 res is Some ==> {
523 &&& res.unwrap() == self.0.va
524 &&& owner.level < owner.guard_level
525 &&& owner.in_locked_range()
526 },
527 )]
528 pub fn find_next(&mut self, len: usize) -> (res: Option<Vaddr>) {
529 #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
530 self.0.find_next(len)
531 }
532
533 /// Jump to the virtual address.
534 ///
535 /// This function will move the cursor to the given virtual address.
536 /// If the target address is not in the locked range, it will return an error.
537 /// # Verified Properties
538 /// ## Preconditions
539 /// The cursor must be within the locked range and below the guard level.
540 /// ## Postconditions
541 /// - If the target address is in the locked range, it will move the cursor to the given address.
542 /// - If the target address is not in the locked range, it will return an error.
543 /// ## Panics
544 /// This method panics if the target address is not aligned to the page size.
545 /// ## Safety
546 /// This function preserves all memory invariants.
547 /// Because it throws an error rather than move the cursor to an invalid address,
548 /// it ensures that the cursor is safe to use after the call.
549 /// The locking mechanism prevents data races.
550 #[verus_spec(res =>
551 with Tracked(owner): Tracked<&mut CursorOwner<'rcu, UserPtConfig>>,
552 Tracked(regions): Tracked<&mut MetaRegionOwners>,
553 Tracked(guards): Tracked<&mut Guards<'rcu, UserPtConfig>>
554 requires
555 old(self).0.invariants(*old(owner), *old(regions), *old(guards)),
556 !old(self).0.jump_panic_condition(va),
557 ensures
558 self.0.invariants(*owner, *regions, *guards),
559 self.0.barrier_va.start <= va < self.0.barrier_va.end ==> {
560 &&& res is Ok
561 &&& self.0.va == va
562 },
563 !(self.0.barrier_va.start <= va < self.0.barrier_va.end) ==> res is Err,
564 )]
565 pub fn jump(&mut self, va: Vaddr) -> Result<()> {
566 (#[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
567 self.0.jump(va))?;
568 Ok(())
569 }
570
571 /// Get the virtual address of the current slot.
572 pub fn virt_addr(&self) -> Vaddr
573 returns
574 self.0.va,
575 {
576 self.0.virt_addr()
577 }
578}
579
580/// The cursor for modifying the mappings in VM space.
581///
582/// It exclusively owns a sub-tree of the page table, preventing others from
583/// reading or modifying the same sub-tree.
584pub struct CursorMut<'a, A: InAtomicMode> {
585 pub pt_cursor: crate::mm::page_table::CursorMut<'a, UserPtConfig, A>,
586 // We have a read lock so the CPU set in the flusher is always a superset
587 // of actual activated CPUs.
588 pub flusher: TlbFlusher<'a /*, DisabledPreemptGuard*/ >,
589}
590
591impl<'a, A: InAtomicMode> CursorMut<'a, A> {
592 pub open spec fn query_requires(
593 cursor: Self,
594 owner: CursorOwner<'a, UserPtConfig>,
595 guard_perm: vstd::simple_pptr::PointsTo<PageTableGuard<'a, UserPtConfig>>,
596 regions: MetaRegionOwners,
597 ) -> bool {
598 &&& cursor.pt_cursor.inner.wf(owner)
599 &&& owner.inv()
600 &&& regions.inv()
601 }
602
603 pub open spec fn query_ensures(
604 old_cursor: Self,
605 new_cursor: Self,
606 owner: CursorOwner<'a, UserPtConfig>,
607 guard_perm: vstd::simple_pptr::PointsTo<PageTableGuard<'a, UserPtConfig>>,
608 old_regions: MetaRegionOwners,
609 new_regions: MetaRegionOwners,
610 r: Result<(Range<Vaddr>, Option<MappedItem>)>,
611 ) -> bool {
612 &&& new_regions.inv()
613 &&& new_cursor.pt_cursor.inner.wf(owner)
614 }
615
616 /// Queries the mapping at the current virtual address.
617 ///
618 /// This is the same as [`Cursor::query`].
619 ///
620 /// If the cursor is pointing to a valid virtual address that is locked,
621 /// it will return the virtual address range and the mapped item.
622 /// ## Preconditions
623 /// - All system invariants must hold
624 /// - **Liveness**: The function will return an error if the cursor is not within the locked range
625 /// ## Postconditions
626 /// - If there is a mapped item at the current virtual address ([`query_some_condition`]),
627 /// it is returned along with the virtual address range that it maps ([`query_success_ensures`]).
628 /// - The mapping that is returned corresponds to the abstract mapping given by [`query_item_spec`](CursorView::query_item_spec).
629 /// - If there is no mapped item at the current virtual address ([`query_none_condition`]),
630 /// it returns `None`, and the virtual address range of the cursor's current position.
631 /// ## Safety
632 /// - This function preserves all memory invariants.
633 /// - The locking mechanism prevents data races.
634 #[verus_spec(res =>
635 with Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
636 Tracked(regions): Tracked<&mut MetaRegionOwners>,
637 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>
638 requires
639 old(self).pt_cursor.inner.invariants(*old(owner), *old(regions), *old(guards)),
640 old(owner).in_locked_range(),
641 ensures
642 self.pt_cursor.inner.invariants(*owner, *regions, *guards),
643 old(self).pt_cursor.inner.query_some_condition(*owner) ==> {
644 &&& res is Ok
645 &&& self.pt_cursor.inner.query_some_ensures(*owner, res.unwrap())
646 },
647 !old(self).pt_cursor.inner.query_some_condition(*owner) ==> {
648 &&& res is Ok
649 &&& self.pt_cursor.inner.query_none_ensures(*owner, res.unwrap())
650 },
651 )]
652 pub fn query(&mut self) -> Result<(Range<Vaddr>, Option<MappedItem>)> {
653 Ok(
654 #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
655 self.pt_cursor.query()?,
656 )
657 }
658
659 /// Moves the cursor forward to the next mapped virtual address.
660 ///
661 /// This is the same as [`Cursor::find_next`].
662 ///
663 /// # Verified Properties
664 /// ## Preconditions
665 /// - **Liveness**: The cursor must be within the locked range and below the guard level.
666 /// The length must be page-aligned and less than or equal to the remaining range of the cursor.
667 /// ## Postconditions
668 /// - If there is a mapped address after the current address within the next `len` bytes,
669 /// it will move the cursor to the next mapped address and return it.
670 /// - If not, it will return `None`. The cursor may stop at any
671 /// address after `len` bytes, but it will not move past the barrier address.
672 /// ## Panics
673 /// This method panics if the length is longer than the remaining range of the cursor.
674 /// ## Safety
675 /// This function preserves all memory invariants.
676 /// Because it panics rather than move the cursor to an invalid address,
677 /// it ensures that the cursor is safe to use after the call.
678 /// The locking mechanism prevents data races.
679 #[verus_spec(res =>
680 with Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
681 Tracked(regions): Tracked<&mut MetaRegionOwners>,
682 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>
683 )]
684 pub fn find_next(&mut self, len: usize) -> (res: Option<Vaddr>)
685 requires
686 old(self).pt_cursor.inner.invariants(*old(owner), *old(regions), *old(guards)),
687 old(self).pt_cursor.inner.level < old(self).pt_cursor.inner.guard_level,
688 old(owner).in_locked_range(),
689 len % PAGE_SIZE == 0,
690 old(self).pt_cursor.inner.va + len <= old(self).pt_cursor.inner.barrier_va.end,
691 ensures
692 self.pt_cursor.inner.invariants(*owner, *regions, *guards),
693 res is Some ==> {
694 &&& res.unwrap() == self.pt_cursor.inner.va
695 &&& owner.level < owner.guard_level
696 &&& owner.in_locked_range()
697 },
698 {
699 #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
700 self.pt_cursor.find_next(len)
701 }
702
703 /// Jump to the virtual address.
704 ///
705 /// This is the same as [`Cursor::jump`].
706 ///
707 /// This function will move the cursor to the given virtual address.
708 /// If the target address is not in the locked range, it will return an error.
709 /// # Verified Properties
710 /// ## Preconditions
711 /// The cursor must be within the locked range and below the guard level.
712 /// ## Postconditions
713 /// - If the target address is in the locked range, it will move the cursor to the given address.
714 /// - If the target address is not in the locked range, it will return an error.
715 /// ## Panics
716 /// This method panics if the target address is not aligned to the page size.
717 /// ## Safety
718 /// This function preserves all memory invariants.
719 /// Because it throws an error rather than move the cursor to an invalid address,
720 /// it ensures that the cursor is safe to use after the call.
721 /// The locking mechanism prevents data races.
722 #[verus_spec(res =>
723 with
724 Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
725 Tracked(regions): Tracked<&mut MetaRegionOwners>,
726 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>
727 requires
728 old(self).pt_cursor.inner.invariants(*old(owner), *old(regions), *old(guards)),
729 !old(self).pt_cursor.inner.jump_panic_condition(va),
730 ensures
731 self.pt_cursor.inner.invariants(*owner, *regions, *guards),
732 self.pt_cursor.inner.barrier_va.start <= va < self.pt_cursor.inner.barrier_va.end ==> {
733 &&& res is Ok
734 &&& self.pt_cursor.inner.va == va
735 },
736 !(self.pt_cursor.inner.barrier_va.start <= va < self.pt_cursor.inner.barrier_va.end) ==> res is Err,
737 )]
738 pub fn jump(&mut self, va: Vaddr) -> Result<()> {
739 (#[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
740 self.pt_cursor.jump(va))?;
741 Ok(())
742 }
743
744 /// Get the virtual address of the current slot.
745 #[verus_spec(r =>
746 returns
747 self.pt_cursor.inner.va,
748 )]
749 pub fn virt_addr(&self) -> Vaddr {
750 self.pt_cursor.virt_addr()
751 }
752
753 /// Get the dedicated TLB flusher for this cursor.
754 pub fn flusher(&self) -> &TlbFlusher<'a> {
755 &self.flusher
756 }
757
758 /// Collects the invariants of the cursor, its owner, and associated tracked structures.
759 /// The cursor must be well-formed with respect to its owner. This will hold before and after the call to `map`.
760 pub open spec fn map_cursor_inv(
761 self,
762 cursor_owner: CursorOwner<'a, UserPtConfig>,
763 guards: Guards<'a, UserPtConfig>,
764 regions: MetaRegionOwners,
765 ) -> bool {
766 &&& cursor_owner.inv()
767 &&& self.pt_cursor.inner.wf(cursor_owner)
768 &&& self.pt_cursor.inner.inv()
769 &&& cursor_owner.children_not_locked(guards)
770 &&& cursor_owner.nodes_locked(guards)
771 &&& cursor_owner.relate_region(regions)
772 &&& !cursor_owner.popped_too_high
773 &&& regions.inv()
774 }
775
776 /// These conditions must hold before the call to `map` but may not be maintained after the call.
777 /// The cursor must be within the locked range and below the guard level, but it may move outside the
778 /// range if the frame being mapped is exactly the length of the remaining range.
779 pub open spec fn map_cursor_requires(
780 self,
781 cursor_owner: CursorOwner<'a, UserPtConfig>,
782 ) -> bool {
783 &&& cursor_owner.in_locked_range()
784 &&& self.pt_cursor.inner.level < self.pt_cursor.inner.guard_level
785 &&& self.pt_cursor.inner.va < self.pt_cursor.inner.barrier_va.end
786 }
787
788 /// Collects the conditions that must hold on the frame being mapped.
789 /// The frame must be well-formed with respect to the entry owner. When converted into a `MappedItem`,
790 /// its physical address must also match, and its level must be between 1 and the highest translation level.
791 pub open spec fn map_item_requires(
792 self,
793 frame: UFrame,
794 prop: PageProperty,
795 entry_owner: EntryOwner<UserPtConfig>,
796 regions: MetaRegionOwners,
797 ) -> bool {
798 let item = MappedItem { frame: frame, prop: prop };
799 let (paddr, level, prop0) = UserPtConfig::item_into_raw_spec(item);
800 &&& prop == prop0
801 &&& entry_owner.frame.unwrap().mapped_pa == paddr
802 &&& entry_owner.frame.unwrap().prop == prop
803 &&& level <= UserPtConfig::HIGHEST_TRANSLATION_LEVEL()
804 &&& 1 <= level <= NR_LEVELS // Should be property of item_into_raw
805 &&& level < self.pt_cursor.inner.guard_level
806 &&& Child::Frame(paddr, level, prop0).wf(entry_owner)
807 &&& self.pt_cursor.inner.va + page_size(level) <= self.pt_cursor.inner.barrier_va.end
808 &&& entry_owner.inv()
809 &&& self.pt_cursor.inner.va % page_size(level) == 0
810 &&& crate::mm::page_table::CursorMut::<'a, UserPtConfig, A>::item_not_mapped(item, regions)
811 &&& crate::mm::page_table::CursorMut::<'a, UserPtConfig, A>::item_slot_in_regions(item, regions)
812 }
813
814 /// The result of a call to `map`. Constructs a `Mapping` from the frame being mapped and the cursor's current virtual address.
815 /// The page table's cursor view will be updated with this mapping, replacing the previous mapping (if any).
816 pub open spec fn map_item_ensures(
817 self,
818 frame: UFrame,
819 prop: PageProperty,
820 old_cursor_view: CursorView<UserPtConfig>,
821 cursor_view: CursorView<UserPtConfig>,
822 ) -> bool {
823 let item = MappedItem { frame: frame, prop: prop };
824 let (paddr, level, prop0) = UserPtConfig::item_into_raw_spec(item);
825 cursor_view == old_cursor_view.map_spec(paddr, page_size(level), prop)
826 }
827
828 /// Map a frame into the current slot.
829 ///
830 /// This method will bring the cursor to the next slot after the modification.
831 /// # Verified Properties
832 /// ## Preconditions
833 /// - The cursor must be within the locked range and below the guard level,
834 /// and the frame must fit within the remaining range of the cursor.
835 /// - The cursor must satisfy all invariants, and the frame must be well-formed when converted into a `MappedItem` ([`map_item_requires`](Self::map_item_requires)).
836 /// ## Postconditions
837 /// After the call, the cursor will satisfy all invariants, and will map the frame into the current slot according to [`map_spec`](CursorView::map_spec).
838 /// After the call, the TLB will not contain any entries for the virtual address range being mapped (TODO).
839 /// ## Safety
840 /// The preconditions of this function require that the frame to be mapped is disjoint from any other mapped frames.
841 /// If this is not the case, the global memory invariants will be violated. If the allocator implementation is correct,
842 /// the user shouldn't be able to create such a frame object in the first place, but currently a proof of that is
843 /// outside of the verification boundary.
844 /// Because this function flushes the TLB if it unmaps a page, it preserves TLB consistency.
845 #[verus_spec(
846 with Tracked(cursor_owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
847 Tracked(entry_owner): Tracked<EntryOwner<UserPtConfig>>,
848 Tracked(regions): Tracked<&mut MetaRegionOwners>,
849 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>,
850 Tracked(tlb_model): Tracked<&mut TlbModel>
851 requires
852 old(tlb_model).inv(),
853 old(self).map_cursor_requires(*old(cursor_owner)),
854 old(self).map_cursor_inv(*old(cursor_owner), *old(guards), *old(regions)),
855 old(self).map_item_requires(frame, prop, entry_owner, *old(regions)),
856 ensures
857 self.map_cursor_inv(*cursor_owner, *guards, *regions),
858 old(self).map_item_ensures(
859 frame,
860 prop,
861 old(self).pt_cursor.inner.model(*old(cursor_owner)),
862 self.pt_cursor.inner.model(*cursor_owner),
863 ),
864 )]
865 pub fn map(&mut self, frame: UFrame, prop: PageProperty) {
866 let start_va = self.virt_addr();
867 let item = MappedItem { frame: frame, prop: prop };
868
869 assert(crate::mm::page_table::CursorMut::<'a, UserPtConfig, A>::item_not_mapped(
870 item,
871 *old(regions),
872 )) by { };
873
874 assert(crate::mm::page_table::CursorMut::<'a, UserPtConfig, A>::item_slot_in_regions(
875 item,
876 *old(regions),
877 )) by { };
878
879 assert(self.pt_cursor.map_item_requires(item, entry_owner)) by { };
880
881 // SAFETY: It is safe to map untyped memory into the userspace.
882 let Err(frag) = (
883 #[verus_spec(with Tracked(cursor_owner), Tracked(entry_owner), Tracked(regions), Tracked(guards))]
884 self.pt_cursor.map(item)) else {
885 return ; // No mapping exists at the current address.
886 };
887
888 match frag {
889 PageTableFrag::Mapped { va, item } => {
890 //debug_assert_eq!(va, start_va);
891 let old_frame = item.frame;
892
893 #[verus_spec(with Tracked(tlb_model))]
894 self.flusher.issue_tlb_flush_with(TlbFlushOp::Address(start_va), old_frame.into());
895 #[verus_spec(with Tracked(tlb_model))]
896 self.flusher.dispatch_tlb_flush();
897 },
898 PageTableFrag::StrayPageTable { .. } => {
899 assert(false) by {
900 assert(UserPtConfig::item_into_raw(item).1 == 1);
901 };
902 //panic!("`UFrame` is base page sized but re-mapping out a child PT");
903 },
904 }
905 }
906
907 /// Clears the mapping starting from the current slot,
908 /// and returns the number of unmapped pages.
909 ///
910 /// This method will bring the cursor forward by `len` bytes in the virtual
911 /// address space after the modification.
912 ///
913 /// Already-absent mappings encountered by the cursor will be skipped. It
914 /// is valid to unmap a range that is not mapped.
915 ///
916 /// It must issue and dispatch a TLB flush after the operation. Otherwise,
917 /// the memory safety will be compromised. Please call this function less
918 /// to avoid the overhead of TLB flush. Using a large `len` is wiser than
919 /// splitting the operation into multiple small ones.
920 ///
921 /// # Panics
922 /// Panics if:
923 /// - the length is longer than the remaining range of the cursor;
924 /// - the length is not page-aligned.
925 #[verus_spec(r =>
926 with Tracked(cursor_owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
927 Tracked(regions): Tracked<&mut MetaRegionOwners>,
928 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>,
929 Tracked(tlb_model): Tracked<&mut TlbModel>
930 requires
931 len > 0,
932 len % PAGE_SIZE == 0,
933 old(self).pt_cursor.inner.va % PAGE_SIZE == 0,
934 old(self).pt_cursor.inner.va + len <= KERNEL_VADDR_RANGE.end as int,
935 old(self).pt_cursor.inner.invariants(*old(cursor_owner), *old(regions), *old(guards)),
936 old(cursor_owner).in_locked_range(),
937 old(self).pt_cursor.inner.va + len <= old(self).pt_cursor.inner.barrier_va.end,
938 old(tlb_model).inv(),
939 ensures
940 self.pt_cursor.inner.invariants(*cursor_owner, *regions, *guards),
941 old(self).pt_cursor.inner.model(*old(cursor_owner)).unmap_spec(len, self.pt_cursor.inner.model(*cursor_owner), r),
942 tlb_model.inv(),
943 )]
944 pub fn unmap(&mut self, len: usize) -> usize {
945 proof {
946 assert(cursor_owner.va.reflect(self.pt_cursor.inner.va));
947 assert(cursor_owner.inv());
948 cursor_owner.va.reflect_prop(self.pt_cursor.inner.va);
949 }
950
951 let end_va = self.virt_addr() + len;
952 let mut num_unmapped: usize = 0;
953
954 let ghost start_mappings: Set<Mapping> = cursor_owner@.mappings;
955 let ghost start_va: Vaddr = cursor_owner@.cur_va;
956 // The "adjusted base" accumulates splits: starts as the split-at-boundaries
957 // version of start_mappings and gets updated when take_next splits huge pages.
958 let ghost mut adjusted_base: Set<Mapping> = cursor_owner@.mappings;
959
960 proof {
961 assert((self.pt_cursor.inner.va + len) % PAGE_SIZE as int == 0) by (compute);
962
963 assert(end_va as int == start_va + len);
964 assume(adjusted_base.finite());
965
966 // At loop entry: cursor_owner@.cur_va == start_va, so filter range is empty
967 assert(cursor_owner@.cur_va == start_va);
968 // The filter [start_va, start_va) contains nothing
969 assert(adjusted_base.filter(|m: Mapping| start_va <= m.va_range.start < start_va)
970 =~= Set::<Mapping>::empty());
971 // So difference with empty set is the set itself
972 assert(adjusted_base.difference(Set::<Mapping>::empty()) =~= adjusted_base);
973 }
974
975 #[verus_spec(
976 invariant
977 self.pt_cursor.inner.va % PAGE_SIZE == 0,
978 end_va % PAGE_SIZE == 0,
979 self.pt_cursor.inner.invariants(*cursor_owner, *regions, *guards),
980 end_va <= self.pt_cursor.inner.barrier_va.end,
981 tlb_model.inv(),
982 start_va <= cursor_owner@.cur_va,
983 // Split-aware invariant: adjusted_base tracks accumulated splits
984 cursor_owner@.mappings =~= adjusted_base.difference(
985 adjusted_base.filter(|m: Mapping| start_va <= m.va_range.start < cursor_owner@.cur_va)),
986 num_unmapped as nat == adjusted_base.filter(
987 |m: Mapping| start_va <= m.va_range.start < cursor_owner@.cur_va).len(),
988 start_va == old(cursor_owner)@.cur_va,
989 adjusted_base.finite(),
990 invariant_except_break
991 self.pt_cursor.inner.va <= end_va,
992 cursor_owner.in_locked_range(),
993 ensures
994 self.pt_cursor.inner.va >= end_va,
995 decreases end_va - self.pt_cursor.inner.va
996 )]
997 loop {
998 let ghost prev_va: Vaddr = cursor_owner@.cur_va;
999 let ghost prev_mappings: Set<Mapping> = cursor_owner@.mappings;
1000 let ghost num_unmapped_before: nat = num_unmapped as nat;
1001
1002 proof {
1003 assert(self.pt_cursor.inner.wf(*cursor_owner));
1004 assert(cursor_owner.inv());
1005 cursor_owner.va.reflect_prop(self.pt_cursor.inner.va);
1006 assert(prev_va == self.pt_cursor.inner.va);
1007 }
1008
1009 // SAFETY: It is safe to un-map memory in the userspace.
1010 let Some(frag) = (unsafe {
1011 #[verus_spec(with Tracked(cursor_owner), Tracked(regions), Tracked(guards))]
1012 self.pt_cursor.take_next(end_va - self.virt_addr())
1013 }) else {
1014 proof {
1015 assert(self.pt_cursor.inner.wf(*cursor_owner));
1016 assert(cursor_owner.inv());
1017 cursor_owner.va.reflect_prop(self.pt_cursor.inner.va);
1018
1019 // take_next None: cursor may overshoot end_va (absent entry at higher level)
1020 assert(cursor_owner@.cur_va >= end_va);
1021
1022 // take_next returned None: mappings preserved, no mappings in scanned range.
1023 // The adjusted_base invariant is maintained since no new splits occurred.
1024 // The filter [start_va, cursor_va) includes [start_va, end_va) plus the
1025 // overshoot range [end_va, cursor_va). Since take_next guarantees no mappings
1026 // in [prev_va, end_va), and no splits occurred, we use end_va as the bound.
1027 assume(cursor_owner@.mappings =~= adjusted_base.difference(
1028 adjusted_base.filter(|m: Mapping| start_va <= m.va_range.start < cursor_owner@.cur_va)));
1029 assume(num_unmapped as nat == adjusted_base.filter(
1030 |m: Mapping| start_va <= m.va_range.start < cursor_owner@.cur_va).len());
1031 }
1032 break ;
1033 };
1034
1035 proof {
1036 // Re-establish reflect for post-call state
1037 assert(self.pt_cursor.inner.wf(*cursor_owner));
1038 assert(cursor_owner.inv());
1039 cursor_owner.va.reflect_prop(self.pt_cursor.inner.va);
1040
1041 let new_va = cursor_owner@.cur_va;
1042
1043 // take_next returned Some. Its postcondition (per-variant):
1044 // Mapped: owner@.mappings = view.split_while_huge(m.page_size).mappings - set![m]
1045 // where view has cur_va = found_va, mappings = old_mappings
1046 // StrayPageTable: owner@.mappings = old_mappings - subtree_filter
1047 // We merge this with the accumulated adjusted_base.
1048 let ghost old_adjusted = adjusted_base;
1049 assume(exists |new_base: Set<Mapping>| {
1050 &&& #[trigger] new_base.finite()
1051 &&& cursor_owner@.mappings =~= new_base.difference(
1052 new_base.filter(|m: Mapping| start_va <= m.va_range.start < new_va))
1053 &&& num_unmapped_before + (
1054 new_base.filter(|m: Mapping| prev_va <= m.va_range.start < new_va).len())
1055 == new_base.filter(|m: Mapping| start_va <= m.va_range.start < new_va).len()
1056 });
1057 // Instantiate and update adjusted_base
1058 let ghost new_base = choose |new_base: Set<Mapping>| {
1059 &&& #[trigger] new_base.finite()
1060 &&& cursor_owner@.mappings =~= new_base.difference(
1061 new_base.filter(|m: Mapping| start_va <= m.va_range.start < new_va))
1062 &&& num_unmapped_before + (
1063 new_base.filter(|m: Mapping| prev_va <= m.va_range.start < new_va).len())
1064 == new_base.filter(|m: Mapping| start_va <= m.va_range.start < new_va).len()
1065 };
1066 adjusted_base = new_base;
1067 }
1068
1069 let ghost mut step_delta: nat = 0;
1070 match frag {
1071 PageTableFrag::Mapped { va, item, .. } => {
1072 let frame = item.frame;
1073 assume(num_unmapped < usize::MAX);
1074 num_unmapped += 1;
1075 proof {
1076 step_delta = 1;
1077 }
1078 #[verus_spec(with Tracked(tlb_model))]
1079 self.flusher.issue_tlb_flush_with(TlbFlushOp::Address(va), frame.into());
1080 },
1081 PageTableFrag::StrayPageTable { pt, va, len, num_frames } => {
1082 assume(num_unmapped + num_frames < usize::MAX);
1083 num_unmapped += num_frames;
1084 proof {
1085 step_delta = num_frames as nat;
1086 }
1087 assume(va + len <= usize::MAX);
1088 #[verus_spec(with Tracked(tlb_model))]
1089 self.flusher.issue_tlb_flush_with(TlbFlushOp::Range(va..va + len), pt);
1090 },
1091 }
1092
1093 proof {
1094 assume(num_unmapped as nat == adjusted_base.filter(
1095 |m: Mapping| start_va <= m.va_range.start < cursor_owner@.cur_va,
1096 ).len());
1097 }
1098 }
1099
1100 proof {
1101 cursor_owner.va.reflect_prop(self.pt_cursor.inner.va);
1102
1103 // Bridge from loop invariant to unmap_spec postcondition.
1104 // The loop invariant uses adjusted_base; unmap_spec uses split_while_huge at boundaries.
1105 // These should coincide but the connection requires deep reasoning about splits.
1106 let old_view = old(self).pt_cursor.inner.model(*old(cursor_owner));
1107 let new_view = self.pt_cursor.inner.model(*cursor_owner);
1108 assume(old_view.unmap_spec(len, new_view, num_unmapped));
1109 }
1110
1111 #[verus_spec(with Tracked(tlb_model))]
1112 self.flusher.dispatch_tlb_flush();
1113
1114 num_unmapped
1115 }
1116
1117 /// Applies the operation to the next slot of mapping within the range.
1118 ///
1119 /// The range to be found in is the current virtual address with the
1120 /// provided length.
1121 ///
1122 /// The function stops and yields the actually protected range if it has
1123 /// actually protected a page, no matter if the following pages are also
1124 /// required to be protected.
1125 ///
1126 /// It also makes the cursor moves forward to the next page after the
1127 /// protected one. If no mapped pages exist in the following range, the
1128 /// cursor will stop at the end of the range and return [`None`].
1129 ///
1130 /// Note that it will **NOT** flush the TLB after the operation. Please
1131 /// make the decision yourself on when and how to flush the TLB using
1132 /// [`Self::flusher`].
1133 ///
1134 /// # Panics
1135 ///
1136 /// Panics if the length is longer than the remaining range of the cursor.
1137 #[verus_spec(r =>
1138 with Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
1139 Tracked(regions): Tracked<&mut MetaRegionOwners>,
1140 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>,
1141 requires
1142 old(regions).inv(),
1143 old(owner).inv(),
1144 !old(owner).cur_entry_owner().is_absent(),
1145 old(self).pt_cursor.inner.wf(*old(owner)),
1146 old(self).pt_cursor.inner.inv(),
1147 old(owner).in_locked_range(),
1148 !old(owner).popped_too_high,
1149 old(owner).children_not_locked(*old(guards)),
1150 old(owner).nodes_locked(*old(guards)),
1151 old(owner).relate_region(*old(regions)),
1152 len % PAGE_SIZE == 0,
1153 old(self).pt_cursor.inner.level < NR_LEVELS,
1154 old(self).pt_cursor.inner.va + len <= old(self).pt_cursor.inner.barrier_va.end,
1155 old(owner).cur_entry_owner().is_frame(),
1156 op.requires((old(owner).cur_entry_owner().frame.unwrap().prop,)),
1157 ensures
1158 )]
1159 pub fn protect_next(
1160 &mut self,
1161 len: usize,
1162 op: impl FnOnce(PageProperty) -> PageProperty,
1163 ) -> Option<Range<Vaddr>> {
1164 // SAFETY: It is safe to protect memory in the userspace.
1165 unsafe {
1166 #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
1167 self.pt_cursor.protect_next(len, op)
1168 }
1169 }
1170}
1171
1172/*cpu_local_cell! {
1173 /// The `Arc` pointer to the activated VM space on this CPU. If the pointer
1174 /// is NULL, it means that the activated page table is merely the kernel
1175 /// page table.
1176 // TODO: If we are enabling ASID, we need to maintain the TLB state of each
1177 // CPU, rather than merely the activated `VmSpace`. When ASID is enabled,
1178 // the non-active `VmSpace`s can still have their TLB entries in the CPU!
1179 static ACTIVATED_VM_SPACE: *const VmSpace = core::ptr::null();
1180}*/
1181
1182/*#[cfg(ktest)]
1183pub(super) fn get_activated_vm_space() -> *const VmSpace {
1184 ACTIVATED_VM_SPACE.load()
1185}*/
1186
1187/// The configuration for user page tables.
1188#[derive(Clone, Debug)]
1189pub struct UserPtConfig {}
1190
1191/// The item that can be mapped into the [`VmSpace`].
1192pub struct MappedItem {
1193 pub frame: UFrame,
1194 pub prop: PageProperty,
1195}
1196
1197#[verus_verify]
1198impl RCClone for MappedItem {
1199
1200 open spec fn clone_requires(self, slot_perm: PointsTo<MetaSlot>, rc_perm: PermissionU64) -> bool {
1201 self.frame.clone_requires(slot_perm, rc_perm)
1202 }
1203
1204 fn clone(&self, Tracked(slot_perm): Tracked<&PointsTo<MetaSlot>>, Tracked(rc_perm): Tracked<&mut PermissionU64>) -> (res: Self)
1205 {
1206 let frame = self.frame.clone(Tracked(slot_perm), Tracked(rc_perm));
1207 Self {
1208 frame,
1209 prop: self.prop,
1210 }
1211 }
1212}
1213
1214// SAFETY: `item_into_raw` and `item_from_raw` are implemented correctly,
1215unsafe impl PageTableConfig for UserPtConfig {
1216 open spec fn TOP_LEVEL_INDEX_RANGE_spec() -> Range<usize> {
1217 0..256
1218 }
1219
1220 open spec fn TOP_LEVEL_CAN_UNMAP_spec() -> (b: bool) {
1221 true
1222 }
1223
1224 fn TOP_LEVEL_INDEX_RANGE() -> Range<usize> {
1225 0..256
1226 }
1227
1228 fn TOP_LEVEL_CAN_UNMAP() -> (b: bool) {
1229 true
1230 }
1231
1232 type E = PageTableEntry;
1233
1234 type C = PagingConsts;
1235
1236 type Item = MappedItem;
1237
1238 open spec fn item_into_raw_spec(item: Self::Item) -> (Paddr, PagingLevel, PageProperty) {
1239 (item.frame.paddr(), 1, item.prop)
1240 }
1241
1242 #[verifier::external_body]
1243 fn item_into_raw(item: Self::Item) -> (Paddr, PagingLevel, PageProperty) {
1244 let MappedItem { frame, prop } = item;
1245 let level = frame.map_level();
1246 let paddr = frame.into_raw();
1247 (paddr, level, prop)
1248 }
1249
1250 uninterp spec fn item_from_raw_spec(
1251 paddr: Paddr,
1252 level: PagingLevel,
1253 prop: PageProperty,
1254 ) -> Self::Item;
1255
1256 #[verifier::external_body]
1257 fn item_from_raw(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self::Item {
1258 let frame = unsafe { UFrame::from_raw(paddr) };
1259 MappedItem { frame, prop }
1260 }
1261
1262 axiom fn axiom_nr_subpage_per_huge_eq_nr_entries();
1263
1264 axiom fn item_roundtrip(item: Self::Item, paddr: Paddr, level: PagingLevel, prop: PageProperty);
1265}
1266
1267} // verus!