ostd/specs/mm/
virt_mem_newer.rs

1//! Virtual-memory specification model used by [`VmSpace`] proofs.
2//!
3//! This module defines:
4//! - [`VirtPtr`], a specification-level virtual pointer over a bounded virtual range;
5//! - [`FrameContents`], a physical-frame content model with alignment and range invariants;
6//! - [`MemView`], a projection of virtual-to-physical mappings and frame contents that supports
7//!   translation, read/write reasoning, borrowing, splitting, and joining.
8//!
9//! The model is intentionally verification-oriented (Verus specs/proofs) and is used by
10//! [`crate::mm::vm_space`] to state and prove reader/writer permission isolation as well
11//! as correctness of low-level virtual memory operations that are used by them.
12//!
13//! [`VmSpace`]: crate::mm::vm_space::VmSpace
14
15use vstd::pervasive::arbitrary;
16use vstd::prelude::*;
17
18use vstd::layout;
19use vstd::raw_ptr;
20use vstd::set;
21use vstd::set_lib;
22
23use core::marker::PhantomData;
24use core::ops::Range;
25
26use crate::mm::{Paddr, Vaddr};
27use crate::prelude::Inv;
28use crate::specs::arch::mm::MAX_PADDR;
29use crate::specs::mm::page_table::Mapping;
30
31#[path = "virt_mem_example.rs"]
32mod virt_mem_example;
33
34verus! {
35
36/// Specification-level virtual pointer with an associated half-open range.
37///
38/// [`VirtPtr`] models a C-like pointer used by verification code:
39/// - `vaddr` is the current cursor position;
40/// - `range` is the allocation-like bounds `[start, end)`.
41///
42/// Most operations require `is_valid()` (i.e., the current pointer is within
43/// range and not one-past-end), while offset operations additionally require
44/// the computed address to stay inside the same range.
45pub struct VirtPtr {
46    /// Current virtual address represented by this pointer value.
47    pub vaddr: Vaddr,
48    /// Logical bounds of the pointer's valid object/range.
49    pub ghost range: Ghost<Range<Vaddr>>,
50}
51
52/// Byte contents of one physical frame range tracked in a [`MemView`].
53///
54/// This is the physical-memory side of the model: each frame base `Paddr` maps
55/// to a [`FrameContents`] value whose metadata (`size`, `range`) constrains how
56/// `contents` can be interpreted.
57pub struct FrameContents {
58    /// Per-byte initialization/value state for this frame.
59    pub contents: Seq<raw_ptr::MemContents<u8>>,
60    /// Frame size in bytes.
61    pub ghost size: Ghost<usize>,
62    /// Physical range covered by this frame, modeled as `[start, end)`.
63    pub ghost range: Ghost<Range<Paddr>>,
64}
65
66impl Inv for FrameContents {
67    /// Well-formedness invariant for [`FrameContents`].
68    ///
69    /// # Invariant
70    /// - `contents.len() == size`.
71    /// - `size == range.end - range.start`.
72    /// - `range.start` and `range.end` are aligned to `size`.
73    /// - `range` is ordered and remains below [`MAX_PADDR`].
74    open spec fn inv(self) -> bool {
75        &&& self.contents.len() == self.size@
76        &&& self.size@ == self.range@.end - self.range@.start
77        &&& self.range@.start % self.size@ == 0
78        &&& self.range@.end % self.size@ == 0
79        &&& self.range@.start <= self.range@.end < MAX_PADDR
80    }
81}
82
83
84/// A local virtual-memory view used in proofs.
85///
86/// A [`MemView`] pairs:
87/// - [`Self::mappings`]: a set of [`Mapping`] entries used by [`Self::addr_transl`];
88/// - [`Self::memory`]: frame bytes keyed by physical frame base (`Paddr`) via [`FrameContents`].
89///
90/// In practice this view is obtained from [`GlobalMemView`] using
91/// [`GlobalMemView::take_view`], then consumed by APIs such as [`VirtPtr::read`],
92/// [`VirtPtr::write`], and higher-level ownership proofs in
93/// [`VmSpaceOwner`].
94///
95/// [`VmSpaceOwner`]: crate::mm::vm_space::vm_space_specs::VmSpaceOwner
96pub tracked struct MemView {
97    /// Virtual-to-physical mapping set used for address translation.
98    pub mappings: Set<Mapping>,
99    /// Physical frame contents for mapped pages referenced by [`Self::mappings`].
100    pub memory: Map<Paddr, FrameContents>
101}
102
103impl MemView {
104    /// Translates a virtual address to `(frame_base_pa, frame_offset)`.
105    ///
106    /// Returns [`Option::None`] if no mapping in [`Self::mappings`] covers `va`.
107    pub open spec fn addr_transl(self, va: usize) -> Option<(usize, usize)> {
108        let mappings = self.mappings.filter(|m: Mapping| m.va_range.start <= va < m.va_range.end);
109        if 0 < mappings.len() {
110            let m = mappings.choose();  // In a well-formed PT there will only be one, but if malformed this is non-deterministic!
111            let off = va - m.va_range.start;
112            Some((m.pa_range.start, off as usize))
113        } else {
114            None
115        }
116    }
117
118    /// Specification read through virtual translation.
119    ///
120    /// Equivalent to resolving via [`Self::addr_transl`] and reading from [`Self::memory`].
121    pub open spec fn read(self, va: usize) -> raw_ptr::MemContents<u8> {
122        let (pa, off) = self.addr_transl(va).unwrap();
123        self.memory[pa].contents[off as int]
124    }
125
126    /// Specification write through virtual translation.
127    ///
128    /// Returns a new [`MemView`] with one byte updated, preserving all mappings.
129    pub open spec fn write(self, va: usize, x: u8) -> Self {
130        let (pa, off) = self.addr_transl(va).unwrap();
131        MemView {
132            memory: self.memory.insert(pa, FrameContents {
133                contents: self.memory[pa].contents.update(off as int, raw_ptr::MemContents::Init(x)),
134                size: self.memory[pa].size,
135                range: self.memory[pa].range,
136            }),
137            ..self
138        }
139    }
140
141    /// Whether two virtual addresses denote equal byte contents in this view.
142    pub open spec fn eq_at(self, va1: usize, va2: usize) -> bool {
143        let (pa1, off1) = self.addr_transl(va1).unwrap();
144        let (pa2, off2) = self.addr_transl(va2).unwrap();
145        self.memory[pa1].contents[off1 as int] == self.memory[pa2].contents[off2 as int]
146    }
147
148    /// Whether `va` is translated and mapped to frame base `pa`.
149    pub open spec fn is_mapped(self, va: usize, pa: usize) -> bool {
150        self.addr_transl(va) is Some && self.addr_transl(va).unwrap().0 == pa
151    }
152
153    /// Specification for borrowing a sub-view covering `[vaddr, vaddr + len)`.
154    ///
155    /// The result keeps overlapping mappings and restricts memory to physical
156    /// frames reachable from that virtual range.
157    pub open spec fn borrow_at_spec(&self, vaddr: usize, len: usize) -> MemView {
158        let range_end = vaddr + len;
159
160        let valid_pas = Set::new(
161            |pa: usize|
162                exists|va: usize|
163                    vaddr <= va < range_end && #[trigger] self.is_mapped(va, pa),
164        );
165
166        MemView {
167            mappings: self.mappings.filter(
168                |m: Mapping| m.va_range.start < range_end && m.va_range.end > vaddr,
169            ),
170            memory: self.memory.restrict(valid_pas),
171        }
172    }
173
174    /// Whether mappings in this view are pairwise VA-disjoint.
175    pub open spec fn mappings_are_disjoint(self) -> bool {
176        forall|m1: Mapping, m2: Mapping|
177            #![trigger self.mappings.contains(m1), self.mappings.contains(m2)]
178            self.mappings.contains(m1) && self.mappings.contains(m2) && m1 != m2 ==> {
179                m1.va_range.end <= m2.va_range.start || m2.va_range.end <= m1.va_range.start
180            }
181    }
182
183    /// Specification for splitting this view at `split_end = vaddr + len`.
184    ///
185    /// Returns `(left, right)` where:
186    /// - `left` covers `[vaddr, split_end)`,
187    /// - `right` covers addresses `>= split_end`.
188    pub open spec fn split_spec(self, vaddr: usize, len: usize) -> (MemView, MemView) {
189        let split_end = vaddr + len;
190
191        // The left part.
192        let left_mappings = self.mappings.filter(
193            |m: Mapping| m.va_range.start < split_end && m.va_range.end > vaddr,
194        );
195        let right_mappings = self.mappings.filter(|m: Mapping| m.va_range.end > split_end);
196
197        let left_pas = Set::new(
198            |pa: usize|
199                exists|va: usize| vaddr <= va < split_end && self.is_mapped(va, pa),
200        );
201        let right_pas = Set::new(
202            |pa: usize| exists|va: usize| va >= split_end && self.is_mapped(va, pa),
203        );
204
205        (
206            MemView { mappings: left_mappings, memory: self.memory.restrict(left_pas) },
207            MemView { mappings: right_mappings, memory: self.memory.restrict(right_pas) },
208        )
209    }
210
211    /// Executable proof wrapper of [`Self::borrow_at_spec`].
212    ///
213    /// # Verified Properties
214    ///
215    /// ## Postconditions
216    /// - `r == self.borrow_at_spec(vaddr, len)`.
217    #[verifier::external_body]
218    pub proof fn borrow_at(tracked &self, vaddr: usize, len: usize) -> (tracked r: &MemView)
219        ensures
220            r == self.borrow_at_spec(vaddr, len),
221    {
222        unimplemented!()
223    }
224
225
226    /// Executable proof wrapper of [`Self::split_spec`].
227    ///
228    /// # Verified Properties
229    ///
230    /// ## Postconditions
231    /// - `r == self.split_spec(vaddr, len)`.
232    #[verifier::external_body]
233    pub proof fn split(tracked self, vaddr: usize, len: usize) -> (tracked r: (Self, Self))
234        ensures
235            r == self.split_spec(vaddr, len),
236    {
237        unimplemented!()
238    }
239
240    /// Lemma: [`Self::split_spec`] preserves translation semantics on each side.
241    ///
242    /// # Verified Properties
243    ///
244    /// ## Preconditions
245    /// - `original.split_spec(vaddr, len) == (left, right)`.
246    ///
247    /// ## Postconditions
248    /// - `right.memory.dom().subset_of(original.memory.dom())`.
249    /// - For any `va` in `[vaddr, vaddr + len)`,
250    ///   `original.addr_transl(va) == left.addr_transl(va)`.
251    /// - For any `va >= vaddr + len`,
252    ///   `original.addr_transl(va) == right.addr_transl(va)`.
253    pub proof fn lemma_split_preserves_transl(
254        original: MemView,
255        vaddr: usize,
256        len: usize,
257        left: MemView,
258        right: MemView,
259    )
260        requires
261            original.split_spec(vaddr, len) == (left, right),
262        ensures
263            right.memory.dom().subset_of(original.memory.dom()),
264            forall|va: usize|
265                vaddr <= va < vaddr + len ==> {
266                    #[trigger] original.addr_transl(va) == left.addr_transl(va)
267                },
268            forall|va: usize|
269                va >= vaddr + len ==> {
270                    #[trigger] original.addr_transl(va) == right.addr_transl(va)
271                },
272    {
273        // Auto.
274        assert(right.memory.dom().subset_of(original.memory.dom()));
275
276        assert forall|va: usize| vaddr <= va < vaddr + len implies original.addr_transl(va)
277            == left.addr_transl(va) by {
278            assert(left.mappings =~= original.mappings.filter(
279                |m: Mapping| m.va_range.start < vaddr + len && m.va_range.end > vaddr,
280            ));
281            let o_mappings = original.mappings.filter(
282                |m: Mapping| m.va_range.start <= va < m.va_range.end,
283            );
284            let l_mappings = left.mappings.filter(
285                |m: Mapping| m.va_range.start <= va < m.va_range.end,
286            );
287
288            assert(l_mappings.subset_of(o_mappings));
289            assert(o_mappings.subset_of(l_mappings)) by {
290                assert forall|m: Mapping| #[trigger]
291                    o_mappings.contains(m) implies l_mappings.contains(m) by {
292                    assume(o_mappings.contains(m));
293                    assert(m.va_range.start < vaddr + len);
294                    assert(m.va_range.end > vaddr);
295                    assert(m.va_range.start <= va < m.va_range.end);
296                    assert(left.mappings.contains(m));
297                }
298            };
299
300            assert(o_mappings =~= l_mappings);
301        }
302
303        assert forall|va: usize| va >= vaddr + len implies original.addr_transl(va)
304            == right.addr_transl(va) by {
305            let split_end = vaddr + len;
306
307            let o_mappings = original.mappings.filter(
308                |m: Mapping| m.va_range.start <= va < m.va_range.end,
309            );
310            let r_mappings = right.mappings.filter(
311                |m: Mapping| m.va_range.start <= va < m.va_range.end,
312            );
313
314            assert forall|m: Mapping| o_mappings.contains(m) implies r_mappings.contains(m) by {
315                assert(m.va_range.end > va);
316                assert(va >= split_end);
317                assert(m.va_range.end > split_end);
318
319                assert(right.mappings.contains(m));
320                assert(r_mappings.contains(m));
321            }
322
323            assert(o_mappings =~= r_mappings);
324        }
325    }
326
327    /// Specification for merging two views.
328    ///
329    /// Mappings are unioned, and memory conflicts are resolved by
330    /// [`vstd::map::Map::union_prefer_right`] with `other` taking precedence.
331    pub open spec fn join_spec(self, other: MemView) -> MemView {
332        MemView {
333            mappings: self.mappings.union(other.mappings),
334            memory: self.memory.union_prefer_right(other.memory),
335        }
336    }
337
338    /// Executable proof wrapper of [`Self::join_spec`].
339    ///
340    /// # Verified Properties
341    ///
342    /// ## Preconditions
343    /// - `old(self).mappings.disjoint(other.mappings)`.
344    ///
345    /// ## Postconditions
346    /// - `*self == old(self).join_spec(other)`.
347    #[verifier::external_body]
348    pub proof fn join(tracked &mut self, tracked other: Self)
349        requires
350            old(self).mappings.disjoint(other.mappings),
351        ensures
352            *self == old(self).join_spec(other),
353    {
354        unimplemented!()
355    }
356
357    /// Lemma: splitting and then joining reconstructs the original view.
358    ///
359    /// # Verified Properties
360    ///
361    /// ## Preconditions
362    /// - `this.split_spec(vaddr, len) == (lhs, rhs)`.
363    /// - Every mapping in `this` starts at or after `vaddr`.
364    /// - Every physical frame tracked by `this.memory` is reachable from some
365    ///   virtual address at or after `vaddr`.
366    ///
367    /// ## Postconditions
368    /// - `lhs.join_spec(rhs)` has the same mappings and memory contents as `this`.
369    pub proof fn lemma_split_join_identity(
370        this: MemView,
371        lhs: MemView,
372        rhs: MemView,
373        vaddr: usize,
374        len: usize,
375    )
376        requires
377            this.split_spec(vaddr, len) == (lhs, rhs),
378            forall|m: Mapping|
379                #[trigger] this.mappings.contains(m) ==> vaddr <= m.va_range.start < m.va_range.end,
380            forall|pa: Paddr|
381                #[trigger] this.memory.contains_key(pa) ==> exists|va: usize|
382                    vaddr <= va && #[trigger] this.is_mapped(va, pa),
383        ensures
384            this.mappings =~= lhs.join_spec(rhs).mappings,
385            this.memory =~= lhs.join_spec(rhs).memory,
386    {
387    }
388}
389
390impl Inv for VirtPtr {
391    open spec fn inv(self) -> bool {
392        &&& self.range@.start <= self.vaddr <= self.range@.end
393        &&& self.range@.start > 0
394        &&& self.range@.end >= self.range@.start
395    }
396}
397
398impl Clone for VirtPtr {
399    fn clone(&self) -> (res: Self)
400        ensures
401            res == self,
402    {
403        Self { vaddr: self.vaddr, range: Ghost(self.range@) }
404    }
405}
406
407impl Copy for VirtPtr {
408
409}
410
411impl VirtPtr {
412    /// Pure constructor specification for [`Self::new`].
413    pub open spec fn new_spec(vaddr: Vaddr, len: usize) -> Self {
414        Self { vaddr, range: Ghost(Range { start: vaddr, end: (vaddr + len) as usize }) }
415    }
416
417    /// Creates a pointer at `vaddr` with logical range `[vaddr, vaddr + len)`.
418    ///
419    /// # Verified Properties
420    ///
421    /// ## Postconditions
422    /// - `result == Self::new_spec(vaddr, len)`.
423    pub fn new(vaddr: Vaddr, len: usize) -> Self
424        returns Self::new_spec(vaddr, len),
425    {
426        Self { vaddr, range: Ghost(Range { start: vaddr, end: (vaddr + len) as usize }) }
427    }
428
429    /// Whether the pointer has a non-null address and consistent bounds.
430    pub open spec fn is_defined(self) -> bool {
431        &&& self.vaddr != 0
432        &&& self.range@.start <= self.vaddr <= self.range@.end
433    }
434
435    /// Whether dereferencing the current pointer position is allowed.
436    ///
437    /// This excludes the one-past-end position (`vaddr == range.end`).
438    pub open spec fn is_valid(self) -> bool {
439        &&& self.is_defined()
440        &&& self.vaddr < self.range@.end
441    }
442
443    /// Reads one byte from `self.vaddr` in the provided memory view.
444    ///
445    /// # Verified Properties
446    ///
447    /// ## Preconditions
448    /// - `mem.addr_transl(self.vaddr) is Some`.
449    /// - Target byte is initialized:
450    ///   `mem.memory[mem.addr_transl(self.vaddr).unwrap().0].contents[mem.addr_transl(self.vaddr).unwrap().1 as int] is Init`.
451    /// - `self.is_valid()`.
452    ///
453    /// ## Postconditions
454    /// - Returns `mem.read(self.vaddr).value()`.
455    #[verifier::external_body]
456    pub fn read(self, Tracked(mem): Tracked<&MemView>) -> u8
457        requires
458            mem.addr_transl(self.vaddr) is Some,
459            mem.memory[mem.addr_transl(self.vaddr).unwrap().0].contents[mem.addr_transl(self.vaddr).unwrap().1 as int] is Init,
460            self.is_valid(),
461        returns
462            mem.read(self.vaddr).value(),
463    {
464        unimplemented!()
465    }
466
467    /// Writes one byte to `self.vaddr` in the provided memory view.
468    ///
469    /// # Verified Properties
470    ///
471    /// ## Preconditions
472    /// - `old(mem).addr_transl(self.vaddr) is Some`.
473    /// - `self.is_valid()`.
474    ///
475    /// ## Postconditions
476    /// - `*mem == old(mem).write(self.vaddr, x)`.
477    #[verifier::external_body]
478    pub fn write(self, Tracked(mem): Tracked<&mut MemView>, x: u8)
479        requires
480            old(mem).addr_transl(self.vaddr) is Some,
481            self.is_valid(),
482        ensures
483            *mem == old(mem).write(self.vaddr, x),
484    {
485        unimplemented!()
486    }
487
488    pub open spec fn add_spec(self, n: usize) -> Self {
489        VirtPtr { vaddr: (self.vaddr + n) as usize, range: self.range }
490    }
491
492    /// Advances the pointer by `n` bytes.
493    ///
494    /// This operation only updates the cursor; it does not perform memory access.
495    ///
496    /// # Verified Properties
497    ///
498    /// ## Preconditions
499    /// - `0 <= old(self).vaddr + n < usize::MAX`.
500    ///
501    /// ## Postconditions
502    /// - `*self == old(self).add_spec(n)`.
503    pub fn add(&mut self, n: usize)
504        requires
505    // Option 1: strict C standard compliance
506    // old(self).range@.start <= self.vaddr + n <= old(self).range@.end,
507    // Option 2: just make sure it doesn't overflow
508
509            0 <= old(self).vaddr + n < usize::MAX,
510        ensures
511            *self == old(self).add_spec(n),
512    // If we take option 1, we can also ensure:
513    // self.is_defined()
514
515    {
516        self.vaddr = self.vaddr + n
517    }
518
519    pub open spec fn read_offset_spec(self, mem: MemView, n: usize) -> u8 {
520        mem.read((self.vaddr + n) as usize).value()
521    }
522
523    /// Reads from `self.vaddr + n` without mutating `self`.
524    ///
525    /// Implemented by creating a temporary pointer and calling [`Self::read`].
526    /// When `self.vaddr == self.range.start`, this behaves like array indexing.
527    ///
528    /// # Verified Properties
529    ///
530    /// ## Preconditions
531    /// - `0 < self.vaddr + n < usize::MAX`.
532    /// - `self.range@.start <= self.vaddr + n < self.range@.end`.
533    /// - `mem.addr_transl((self.vaddr + n) as usize) is Some`.
534    /// - Target byte at offset is initialized.
535    ///
536    /// ## Postconditions
537    /// - Returns `self.read_offset_spec(*mem, n)`.
538    pub fn read_offset(&self, Tracked(mem): Tracked<&MemView>, n: usize) -> u8
539        requires
540            0 < self.vaddr + n < usize::MAX,
541            self.range@.start <= self.vaddr + n < self.range@.end,
542            mem.addr_transl((self.vaddr + n) as usize) is Some,
543            mem.memory[mem.addr_transl((self.vaddr + n) as usize).unwrap().0].contents[mem.addr_transl((self.vaddr + n) as usize).unwrap().1 as int] is Init,
544        returns
545            self.read_offset_spec(*mem, n),
546    {
547        let mut tmp = self.clone();
548        tmp.add(n);
549        tmp.read(Tracked(mem))
550    }
551
552    pub open spec fn write_offset_spec(self, mem: MemView, n: usize, x: u8) -> MemView {
553        mem.write((self.vaddr + n) as usize, x)
554    }
555
556    /// Writes to `self.vaddr + n` without mutating `self`.
557    ///
558    /// # Verified Properties
559    ///
560    /// ## Preconditions
561    /// - `self.inv()`.
562    /// - `self.range@.start <= self.vaddr + n < self.range@.end`.
563    /// - `old(mem).addr_transl((self.vaddr + n) as usize) is Some`.
564    pub fn write_offset(&self, Tracked(mem): Tracked<&mut MemView>, n: usize, x: u8)
565        requires
566            self.inv(),
567            self.range@.start <= self.vaddr + n < self.range@.end,
568            old(mem).addr_transl((self.vaddr + n) as usize) is Some,
569    {
570        let mut tmp = self.clone();
571        tmp.add(n);
572        tmp.write(Tracked(mem), x)
573    }
574
575    pub open spec fn copy_offset_spec(src: Self, dst: Self, mem_src: MemView, mem_dst: MemView, n: usize) -> MemView {
576        let x = src.read_offset_spec(mem_src, n);
577        dst.write_offset_spec(mem_dst, n, x)
578    }
579
580    /// Copies one byte from `src + n` to `dst + n`.
581    ///
582    /// Source and destination are reasoned about via distinct memory views.
583    ///
584    /// # Verified Properties
585    ///
586    /// ## Preconditions
587    /// - `src.inv()` and `dst.inv()`.
588    /// - Source offset and destination offset are both in-range.
589    /// - Source offset is mapped and initialized in `mem_src`.
590    /// - Destination offset is mapped in `old(mem_dst)`.
591    ///
592    /// ## Postconditions
593    /// - `*mem_dst == Self::copy_offset_spec(*src, *dst, *mem_src, *old(mem_dst), n)`.
594    /// - `mem_dst.mappings == old(mem_dst).mappings`.
595    /// - `mem_dst.memory.dom() == old(mem_dst).memory.dom()`.
596    pub fn copy_offset(src: &Self, dst: &Self, Tracked(mem_src): Tracked<&MemView>, Tracked(mem_dst): Tracked<&mut MemView>, n: usize)
597        requires
598            src.inv(),
599            dst.inv(),
600            src.range@.start <= src.vaddr + n < src.range@.end,
601            mem_src.addr_transl((src.vaddr + n) as usize) is Some,
602            mem_src.memory.contains_key(mem_src.addr_transl((src.vaddr + n) as usize).unwrap().0),
603            mem_src.memory[mem_src.addr_transl((src.vaddr + n) as usize).unwrap().0].contents[mem_src.addr_transl((src.vaddr + n) as usize).unwrap().1 as int] is Init,
604
605            dst.range@.start <= dst.vaddr + n < dst.range@.end,
606            old(mem_dst).addr_transl((dst.vaddr + n) as usize) is Some,
607        ensures
608            *mem_dst == Self::copy_offset_spec(*src, *dst, *mem_src, *old(mem_dst), n),
609            mem_dst.mappings == old(mem_dst).mappings,
610            mem_dst.memory.dom() == old(mem_dst).memory.dom(),
611
612    {
613        let x = src.read_offset(Tracked(mem_src), n);
614        proof { admit() }
615        ;
616        dst.write_offset(Tracked(mem_dst), n, x)
617    }
618
619    pub open spec fn memcpy_spec(src: Self, dst: Self, mem_src: MemView, mem_dst: MemView, n: usize) -> MemView
620        decreases n,
621    {
622        if n == 0 {
623            mem_dst
624        } else {
625            let mem_dst_1 = Self::copy_offset_spec(src, dst, mem_src, mem_dst, (n - 1) as usize);
626
627            Self::memcpy_spec(src, dst, mem_src, mem_dst_1, (n - 1) as usize)
628        }
629    }
630
631    /// Copies `n` bytes from `src` to `dst` in the given memory view.
632    ///
633    /// The source and destination must *not* overlap.
634    /// `copy_nonoverlapping` is semantically equivalent to C’s `memcpy`,
635    /// but with the source and destination arguments swapped.
636    ///
637    /// `mem` points to `src`'s owned memory regions.
638    ///
639    /// # Verified Properties
640    ///
641    /// ## Preconditions
642    /// - `src.inv()` and `dst.inv()`.
643    /// - Source and destination ranges are non-overlapping.
644    /// - Source slice `[src.vaddr, src.vaddr + n)` is in-range and initialized in `mem_src`.
645    /// - Destination slice `[dst.vaddr, dst.vaddr + n)` is in-range and mapped in `old(mem_dst)`.
646    ///
647    /// ## Postconditions
648    /// - `*mem_dst == Self::memcpy_spec(*src, *dst, *mem_src, *old(mem_dst), n)`.
649    /// - `mem_dst.mappings == old(mem_dst).mappings`.
650    /// - `mem_dst.memory.dom() == old(mem_dst).memory.dom()`.
651    /// - Destination slice remains mapped in `mem_dst`.
652    pub fn copy_nonoverlapping(
653        src: &Self,
654        dst: &Self,
655        Tracked(mem_src): Tracked<&MemView>,
656        Tracked(mem_dst): Tracked<&mut MemView>,
657        n: usize,
658    )
659        requires
660            src.inv(),
661            dst.inv(),
662            src.range@.end <= dst.range@.start || dst.range@.end <= src.range@.start,
663            src.range@.start <= src.vaddr,
664            src.vaddr + n <= src.range@.end,
665            forall|i: usize|
666                #![trigger mem_src.addr_transl(i)]
667                src.vaddr <= i < src.vaddr + n ==> {
668                    &&& mem_src.addr_transl(i) is Some
669                    &&& mem_src.memory.contains_key(mem_src.addr_transl(i).unwrap().0)
670                    &&& mem_src.memory[mem_src.addr_transl(i).unwrap().0].contents[mem_src.addr_transl(i).unwrap().1 as int] is Init
671                },
672            dst.range@.start <= dst.vaddr,
673            dst.vaddr + n <= dst.range@.end,
674            forall|i: usize|
675                dst.vaddr <= i < dst.vaddr + n ==> {
676                    &&& old(mem_dst).addr_transl(i) is Some
677                },
678        ensures
679            *mem_dst == Self::memcpy_spec(*src, *dst, *mem_src, *old(mem_dst), n),
680            mem_dst.mappings == old(mem_dst).mappings,
681            mem_dst.memory.dom() == old(mem_dst).memory.dom(),
682            forall|i: usize|
683                #![trigger mem_dst.addr_transl(i)]
684                dst.vaddr <= i < dst.vaddr + n ==> {
685                    &&& mem_dst.addr_transl(i) is Some
686            },
687        decreases n,
688    {
689        if n == 0 {
690            return ;
691        } else {
692            let ghost mem0 = *mem_dst;
693
694            Self::copy_offset(src, dst, Tracked(mem_src), Tracked(mem_dst), n - 1);
695
696            proof {
697                assert(forall|i: usize|
698                    dst.vaddr <= i < dst.vaddr + n - 1 ==>
699                    mem_dst.addr_transl(i) == mem0.addr_transl(i)
700                );
701            }
702
703            Self::copy_nonoverlapping(src, dst, Tracked(mem_src), Tracked(mem_dst), n - 1);
704        }
705    }
706
707    /// Builds a valid [`VirtPtr`] from a concrete virtual-address range.
708    ///
709    /// # Verified Properties
710    ///
711    /// ## Preconditions
712    /// - `vaddr != 0`.
713    /// - `0 < len <= usize::MAX - vaddr`.
714    ///
715    /// ## Postconditions
716    /// - `r.is_valid()`.
717    /// - `r.range@.start == vaddr`.
718    /// - `r.range@.end == (vaddr + len) as usize`.
719    pub fn from_vaddr(vaddr: usize, len: usize) -> (r: Self)
720        requires
721            vaddr != 0,
722            0 < len <= usize::MAX - vaddr,
723        ensures
724            r.is_valid(),
725            r.range@.start == vaddr,
726            r.range@.end == (vaddr + len) as usize,
727    {
728        Self { vaddr, range: Ghost(Range { start: vaddr, end: (vaddr + len) as usize }) }
729    }
730
731    /// Executable helper to split the [`VirtPtr`] into two contiguous ranges.
732    ///
733    /// This updates ghost ranges to match a split operation over the same region.
734    ///
735    /// # Verified Properties
736    ///
737    /// ## Preconditions
738    /// - `self.is_valid()`.
739    /// - `0 <= n <= self.range@.end - self.range@.start`.
740    /// - `self.vaddr == self.range@.start`.
741    ///
742    /// ## Postconditions
743    /// - Left pointer covers `[self.range@.start, self.range@.start + n)`.
744    /// - Right pointer covers `[self.range@.start + n, self.range@.end)`.
745    #[verus_spec(r =>
746        requires
747            self.is_valid(),
748            0 <= n <= self.range@.end - self.range@.start,
749            self.vaddr == self.range@.start,
750        ensures
751            r.0.range@.start == self.range@.start,
752            r.0.range@.end == self.range@.start + n,
753            r.0.vaddr == self.range@.start,
754            r.1.range@.start == self.range@.start + n,
755            r.1.range@.end == self.range@.end,
756            r.1.vaddr == self.range@.start + n,
757    )]
758    pub fn split(self, n: usize) -> (Self, Self) {
759        let left = VirtPtr {
760            vaddr: self.vaddr,
761            range: Ghost(Range { start: self.vaddr, end: (self.vaddr + n) as usize }),
762        };
763
764        let right = VirtPtr {
765            vaddr: self.vaddr + n,
766            range: Ghost(Range { start: (self.vaddr + n) as usize, end: self.range@.end }),
767        };
768
769        (left, right)
770    }
771}
772
773/// A [`GlobalMemView`] is a more abstract view of memory that elides most of the details. The API specifications
774/// of higher-level objects like [`VmSpaceOwner`](crate::specs::mm::vm_space::VmSpaceOwner) use this view.
775pub tracked struct GlobalMemView {
776    pub pt_mappings: Set<Mapping>,
777    pub tlb_mappings: Set<Mapping>,
778    pub unmapped_pas: Set<Paddr>,
779    pub memory: Map<Paddr, FrameContents>,
780}
781
782impl Inv for GlobalMemView {
783
784    open spec fn inv(self) -> bool {
785        &&& forall |m: Mapping| #![auto] self.tlb_mappings.contains(m) ==> {
786            &&& m.inv()
787            &&& forall|pa: Paddr| m.pa_range.start <= pa < m.pa_range.end ==> {
788                &&& self.memory.dom().contains(pa)
789            }
790            &&& self.memory.contains_key(m.pa_range.start)
791            &&& self.memory[m.pa_range.start].size == m.page_size
792            &&& self.memory[m.pa_range.start].inv()
793        }
794        &&& forall |m: Mapping|
795            forall |n: Mapping| #![auto]
796            self.tlb_mappings.contains(m) ==>
797            self.tlb_mappings.contains(n) ==>
798            m != n ==>
799            #[trigger]
800            m.va_range.end <= n.va_range.start || n.va_range.end <= m.va_range.start
801        &&& self.tlb_mappings.finite()
802        &&& self.pt_mappings.finite()
803        &&& self.memory.dom().finite()
804        &&& self.all_pas_accounted_for()
805        &&& self.pas_uniquely_mapped()
806        &&& self.unmapped_correct()
807    }
808}
809
810impl GlobalMemView {
811
812    pub open spec fn addr_transl(self, va: usize) -> Option<(usize, usize)> {
813        let mappings = self.tlb_mappings.filter(|m: Mapping| m.va_range.start <= va < m.va_range.end);
814        if 0 < mappings.len() {
815            let m = mappings.choose();  // In a well-formed TLB there will only be one, but if malformed this is non-deterministic!
816            let off = va - m.va_range.start;
817            Some((m.pa_range.start, off as usize))
818        } else {
819            None
820        }
821    }
822
823    pub open spec fn is_mapped(self, pa: usize) -> bool {
824        exists|m: Mapping| self.tlb_mappings.contains(m) && m.pa_range.start <= pa < m.pa_range.end
825    }
826
827    pub open spec fn all_pas_accounted_for(self) -> bool {
828        forall|pa: Paddr|
829            0 <= pa < MAX_PADDR ==>
830            #[trigger] self.is_mapped(pa) || #[trigger] self.unmapped_pas.contains(pa)
831    }
832
833    pub open spec fn pas_uniquely_mapped(self) -> bool {
834        forall|m1: Mapping, m2: Mapping|
835             #[trigger] self.tlb_mappings.contains(m1) && #[trigger] self.tlb_mappings.contains(m2) && m1 != m2 ==>
836            m1.pa_range.end <= m2.pa_range.start || m2.pa_range.end <= m1.pa_range.start
837    }
838
839    pub open spec fn unmapped_correct(self) -> bool {
840        forall|pa: Paddr|
841            #![auto]
842            self.is_mapped(pa) <==> !self.unmapped_pas.contains(pa)
843    }
844
845    pub open spec fn take_view_spec(self, vaddr: usize, len: usize) -> (Self, MemView) {
846        let range_end = vaddr + len;
847
848        let leave_mappings: Set<Mapping> = self.tlb_mappings.filter(
849            |m: Mapping| m.va_range.end <= vaddr || m.va_range.start > range_end
850        );
851
852        let take_mappings = self.tlb_mappings.filter(
853            |m: Mapping| m.va_range.start < range_end && m.va_range.end > vaddr
854        );
855
856        let leave_pas = Set::new(
857            |pa: usize|
858                exists|m: Mapping| leave_mappings.contains(m) && m.pa_range.start <= pa < m.pa_range.end
859        );
860        let take_pas = Set::new(
861            |pa: usize|
862                exists|m: Mapping| take_mappings.contains(m) && m.pa_range.start <= pa < m.pa_range.end
863        );
864
865        (
866            GlobalMemView {
867                tlb_mappings: leave_mappings,
868                memory: self.memory.restrict(leave_pas),
869                ..self
870            },
871            MemView { mappings: take_mappings, memory: self.memory.restrict(take_pas) },
872        )
873    }
874
875    pub axiom fn take_view(tracked &mut self, vaddr: usize, len: usize) -> (tracked view: MemView)
876        ensures
877            *self == old(self).take_view_spec(vaddr, len).0,
878            view == old(self).take_view_spec(vaddr, len).1;
879
880    pub open spec fn return_view_spec(self, view: MemView) -> Self {
881        GlobalMemView {
882            tlb_mappings: self.tlb_mappings.union(view.mappings),
883            memory: self.memory.union_prefer_right(view.memory),
884            ..self
885        }
886    }
887
888    pub axiom fn return_view(&mut self, view: MemView)
889        ensures
890            *self == old(self).return_view_spec(view);
891
892    pub open spec fn tlb_flush_vaddr_spec(self, vaddr: Vaddr) -> Self {
893        let tlb_mappings = self.tlb_mappings.filter(
894            |m: Mapping| m.va_range.end <= vaddr || vaddr < m.va_range.start
895        );
896        GlobalMemView {
897            tlb_mappings,
898            ..self
899        }
900    }
901
902    pub axiom fn tlb_flush_vaddr(&mut self, vaddr: Vaddr)
903        requires
904            old(self).inv()
905        ensures
906            *self == old(self).tlb_flush_vaddr_spec(vaddr),
907            self.inv();
908
909    pub open spec fn tlb_soft_fault_spec(self, vaddr: Vaddr) -> Self {
910        let mapping = self.pt_mappings.filter(|m: Mapping| m.va_range.start <= vaddr < m.va_range.end).choose();
911        GlobalMemView {
912            tlb_mappings: self.tlb_mappings.insert(mapping),
913            ..self
914        }
915    }
916
917    pub axiom fn tlb_soft_fault(tracked &mut self, vaddr: Vaddr)
918        requires
919            old(self).inv(),
920            old(self).addr_transl(vaddr) is None,
921        ensures
922            *self == old(self).tlb_soft_fault_spec(vaddr),
923            self.inv();
924
925    pub open spec fn pt_map_spec(self, m: Mapping) -> Self {
926        let pt_mappings = self.pt_mappings.insert(m);
927        let unmapped_pas = self.unmapped_pas.difference(
928            Set::new(|pa: usize| m.pa_range.start <= pa < m.pa_range.end)
929        );
930        GlobalMemView {
931            pt_mappings,
932            unmapped_pas,
933            ..self
934        }
935    }
936
937    pub axiom fn pt_map(&mut self, m: Mapping)
938        requires
939            forall|pa: Paddr|
940                m.pa_range.start <= pa < m.pa_range.end ==>
941                old(self).unmapped_pas.contains(pa),
942            old(self).inv()
943        ensures
944            *self == old(self).pt_map_spec(m);
945
946    pub open spec fn pt_unmap_spec(self, m: Mapping) -> Self {
947        let pt_mappings = self.pt_mappings.remove(m);
948        let unmapped_pas = self.unmapped_pas.union(
949            Set::new(|pa: usize| m.pa_range.start <= pa < m.pa_range.end)
950        );
951        GlobalMemView {
952            pt_mappings,
953            unmapped_pas,
954            ..self
955        }
956    }
957
958    pub axiom fn pt_unmap(&mut self, m: Mapping)
959        requires
960            old(self).pt_mappings.contains(m),
961            old(self).inv()
962        ensures
963            *self == old(self).pt_unmap_spec(m),
964            self.inv();
965
966    pub proof fn lemma_va_mapping_unique(self, va: usize)
967        requires
968            self.inv(),
969        ensures
970            self.tlb_mappings.filter(|m: Mapping| m.va_range.start <= va < m.va_range.end).is_singleton(),
971    {
972        admit()
973    }
974}
975
976} // verus!