Skip to main content

ostd/specs/mm/
virt_mem_newer.rs

1//! Virtual-memory specification model used by [`VmSpace`] proofs.
2//!
3//! This module defines:
4//! - [`VirtPtr`], a specification-level virtual pointer over a bounded virtual range;
5//! - [`FrameContents`], a physical-frame content model with alignment and range invariants;
6//! - [`MemView`], a projection of virtual-to-physical mappings and frame contents that supports
7//!   translation, read/write reasoning, borrowing, splitting, and joining.
8//!
9//! The model is intentionally verification-oriented (Verus specs/proofs) and is used by
10//! [`crate::mm::vm_space`] to state and prove reader/writer permission isolation as well
11//! as correctness of low-level virtual memory operations that are used by them.
12//!
13//! [`VmSpace`]: crate::mm::vm_space::VmSpace
14
15use vstd::pervasive::arbitrary;
16use vstd::prelude::*;
17
18use vstd::layout;
19use vstd::raw_ptr;
20use vstd::set;
21use vstd::set_lib;
22
23use core::marker::PhantomData;
24use core::ops::Range;
25
26use crate::mm::{Paddr, Vaddr};
27use crate::prelude::Inv;
28use crate::specs::arch::mm::MAX_PADDR;
29use crate::specs::mm::page_table::Mapping;
30
31#[path = "virt_mem_example.rs"]
32mod virt_mem_example;
33
34verus! {
35
36/// Specification-level virtual pointer with an associated half-open range.
37///
38/// [`VirtPtr`] models a C-like pointer used by verification code:
39/// - `vaddr` is the current cursor position;
40/// - `range` is the allocation-like bounds `[start, end)`.
41///
42/// Most operations require `is_valid()` (i.e., the current pointer is within
43/// range and not one-past-end), while offset operations additionally require
44/// the computed address to stay inside the same range.
45pub struct VirtPtr {
46    /// Current virtual address represented by this pointer value.
47    pub vaddr: Vaddr,
48    /// Logical bounds of the pointer's valid object/range.
49    pub range: Ghost<Range<Vaddr>>,
50}
51
52/// Byte contents of one physical frame range tracked in a [`MemView`].
53///
54/// This is the physical-memory side of the model: each frame base `Paddr` maps
55/// to a [`FrameContents`] value whose metadata (`size`, `range`) constrains how
56/// `contents` can be interpreted.
57pub ghost struct FrameContents {
58    /// Per-byte initialization/value state for this frame.
59    pub contents: Seq<raw_ptr::MemContents<u8>>,
60    /// Frame size in bytes.
61    pub size: usize,
62    /// Physical range covered by this frame, modeled as `[start, end)`.
63    pub range: Range<Paddr>,
64}
65
66impl Inv for FrameContents {
67    /// Well-formedness invariant for [`FrameContents`].
68    ///
69    /// # Invariant
70    /// - `contents.len() == size`.
71    /// - `size == range.end - range.start`.
72    /// - `range.start` and `range.end` are aligned to `size`.
73    /// - `range` is ordered and remains below [`MAX_PADDR`].
74    open spec fn inv(self) -> bool {
75        &&& self.contents.len() == self.size@
76        &&& self.size@ == self.range.end - self.range.start
77        &&& self.range.start % self.size == 0
78        &&& self.range.end % self.size == 0
79        &&& self.range.start <= self.range.end < MAX_PADDR
80    }
81}
82
83
84/// A local virtual-memory view used in proofs.
85///
86/// A [`MemView`] pairs:
87/// - [`Self::mappings`]: a set of [`Mapping`] entries used by [`Self::addr_transl`];
88/// - [`Self::memory`]: frame bytes keyed by physical frame base (`Paddr`) via [`FrameContents`].
89///
90/// In practice this view is obtained from [`GlobalMemView`] using
91/// [`GlobalMemView::take_view`], then consumed by APIs such as [`VirtPtr::read`],
92/// [`VirtPtr::write`], and higher-level ownership proofs in
93/// [`VmSpaceOwner`].
94///
95/// [`VmSpaceOwner`]: crate::mm::vm_space::vm_space_specs::VmSpaceOwner
96pub tracked struct MemView {
97    /// Virtual-to-physical mapping set used for address translation.
98    pub mappings: Set<Mapping>,
99    /// Physical frame contents for mapped pages referenced by [`Self::mappings`].
100    pub memory: Map<Paddr, FrameContents>
101}
102
103impl MemView {
104    /// Translates a virtual address to `(frame_base_pa, frame_offset)`.
105    ///
106    /// Returns [`Option::None`] if no mapping in [`Self::mappings`] covers `va`.
107    pub open spec fn addr_transl(self, va: usize) -> Option<(usize, usize)> {
108        let mappings = self.mappings.filter(|m: Mapping| m.va_range.start <= va < m.va_range.end);
109        if 0 < mappings.len() {
110            let m = mappings.choose();  // In a well-formed PT there will only be one, but if malformed this is non-deterministic!
111            let off = va - m.va_range.start;
112            Some((m.pa_range.start, off as usize))
113        } else {
114            None
115        }
116    }
117
118    /// Specification read through virtual translation.
119    ///
120    /// Equivalent to resolving via [`Self::addr_transl`] and reading from [`Self::memory`].
121    pub open spec fn read(self, va: usize) -> raw_ptr::MemContents<u8> {
122        let (pa, off) = self.addr_transl(va)->Some_0;
123        self.memory[pa].contents[off as int]
124    }
125
126    /// Specification write through virtual translation.
127    ///
128    /// Returns a new [`MemView`] with one byte updated, preserving all mappings.
129    pub open spec fn write(self, va: usize, x: u8) -> Self {
130        let (pa, off) = self.addr_transl(va)->Some_0;
131        MemView {
132            memory: self.memory.insert(pa, FrameContents {
133                contents: self.memory[pa].contents.update(off as int, raw_ptr::MemContents::Init(x)),
134                ..self.memory[pa]
135            }),
136            ..self
137        }
138    }
139
140    /// Whether two virtual addresses denote equal byte contents in this view.
141    pub open spec fn eq_at(self, va1: usize, va2: usize) -> bool {
142        let (pa1, off1) = self.addr_transl(va1)->Some_0;
143        let (pa2, off2) = self.addr_transl(va2)->Some_0;
144        self.memory[pa1].contents[off1 as int] == self.memory[pa2].contents[off2 as int]
145    }
146
147    /// Whether `va` is translated and mapped to frame base `pa`.
148    pub open spec fn is_mapped(self, va: usize, pa: usize) -> bool {
149        self.addr_transl(va) is Some && self.addr_transl(va)->Some_0.0 == pa
150    }
151
152    /// Specification for borrowing a sub-view covering `[vaddr, vaddr + len)`.
153    ///
154    /// The result keeps overlapping mappings and restricts memory to physical
155    /// frames reachable from that virtual range.
156    pub open spec fn borrow_at_spec(&self, vaddr: usize, len: usize) -> MemView {
157        let range_end = vaddr + len;
158
159        let valid_pas = Set::new(
160            |pa: usize|
161                exists |va: usize|
162                    vaddr <= va < range_end && #[trigger] self.is_mapped(va, pa),
163        );
164
165        MemView {
166            mappings: self.mappings.filter(
167                |m: Mapping| m.va_range.start < range_end && m.va_range.end > vaddr,
168            ),
169            memory: self.memory.restrict(valid_pas),
170        }
171    }
172
173    /// Whether mappings in this view are pairwise VA-disjoint.
174    pub open spec fn mappings_are_disjoint(self) -> bool {
175        forall|m1: Mapping, m2: Mapping|
176            #![trigger self.mappings.contains(m1), self.mappings.contains(m2)]
177            self.mappings.contains(m1) && self.mappings.contains(m2) && m1 != m2 ==> {
178                m1.va_range.end <= m2.va_range.start || m2.va_range.end <= m1.va_range.start
179            }
180    }
181
182    /// Specification for splitting this view at `split_end = vaddr + len`.
183    ///
184    /// Returns `(left, right)` where:
185    /// - `left` covers `[vaddr, split_end)`, i.e. all mappings overlapping that range.
186    /// - `right` covers addresses `>= split_end`.
187    pub open spec fn split_spec(self, vaddr: usize, len: usize) -> (MemView, MemView) {
188        let split_end = vaddr + len;
189
190        // The left part.
191        let left_mappings = self.mappings.filter(
192            |m: Mapping| m.va_range.start < split_end && m.va_range.end > vaddr,
193        );
194        let right_mappings = self.mappings.filter(|m: Mapping| m.va_range.end > split_end);
195
196        let left_pas = Set::new(
197            |pa: usize|
198                exists|va: usize| vaddr <= va < split_end && self.is_mapped(va, pa),
199        );
200        let right_pas = Set::new(
201            |pa: usize| exists|va: usize| va >= split_end && self.is_mapped(va, pa),
202        );
203
204        (
205            MemView { mappings: left_mappings, memory: self.memory.restrict(left_pas) },
206            MemView { mappings: right_mappings, memory: self.memory.restrict(right_pas) },
207        )
208    }
209
210    /// Tracked wrapper of [`Self::borrow_at_spec`].
211    ///
212    /// # Verified Properties
213    ///
214    /// ## Postconditions
215    /// - `r == self.borrow_at_spec(vaddr, len)`.
216    #[verifier::external_body]
217    pub proof fn borrow_at(tracked &self, vaddr: usize, len: usize) -> (tracked r: &MemView)
218        ensures
219            r == self.borrow_at_spec(vaddr, len),
220    {
221        unimplemented!()
222    }
223
224
225    /// Tracked wrapper of [`Self::split_spec`].
226    ///
227    /// # Verified Properties
228    ///
229    /// ## Postconditions
230    /// - `r == self.split_spec(vaddr, len)`.
231    #[verifier::external_body]
232    pub proof fn split(tracked self, vaddr: usize, len: usize) -> (tracked r: (Self, Self))
233        ensures
234            r == self.split_spec(vaddr, len),
235    {
236        unimplemented!()
237    }
238
239    /// Lemma: [`Self::split_spec`] preserves translation semantics on each side.
240    ///
241    /// # Verified Properties
242    ///
243    /// ## Preconditions
244    /// - `original.split_spec(vaddr, len) == (left, right)`.
245    ///
246    /// ## Postconditions
247    /// - `right.memory.dom().subset_of(original.memory.dom())`.
248    /// - For any `va` in `[vaddr, vaddr + len)`,
249    ///   `original.addr_transl(va) == left.addr_transl(va)`.
250    /// - For any `va >= vaddr + len`,
251    ///   `original.addr_transl(va) == right.addr_transl(va)`.
252    pub proof fn lemma_split_preserves_transl(
253        original: MemView,
254        vaddr: usize,
255        len: usize,
256        left: MemView,
257        right: MemView,
258    )
259        requires
260            original.split_spec(vaddr, len) == (left, right),
261        ensures
262            right.memory.dom().subset_of(original.memory.dom()),
263            forall|va: usize|
264                vaddr <= va < vaddr + len ==> {
265                    #[trigger] original.addr_transl(va) == left.addr_transl(va)
266                },
267            forall|va: usize|
268                va >= vaddr + len ==> {
269                    #[trigger] original.addr_transl(va) == right.addr_transl(va)
270                },
271    {
272        assert(right.memory.dom().subset_of(original.memory.dom()));
273
274        assert forall|va: usize| vaddr <= va < vaddr + len implies original.addr_transl(va)
275            == left.addr_transl(va) by {
276            assert(left.mappings =~= original.mappings.filter(
277                |m: Mapping| m.va_range.start < vaddr + len && m.va_range.end > vaddr,
278            ));
279            let o_mappings = original.mappings.filter(
280                |m: Mapping| m.va_range.start <= va < m.va_range.end,
281            );
282            let l_mappings = left.mappings.filter(
283                |m: Mapping| m.va_range.start <= va < m.va_range.end,
284            );
285
286            assert(l_mappings <= o_mappings);
287            assert forall|m: Mapping| #[trigger]
288                o_mappings.contains(m) implies l_mappings.contains(m) by {
289                assert(left.mappings.contains(m));
290            };
291            assert(o_mappings <= l_mappings);
292            assert(o_mappings == l_mappings);
293        }
294
295        assert forall|va: usize| va >= vaddr + len implies original.addr_transl(va)
296            == right.addr_transl(va) by {
297            let split_end = vaddr + len;
298
299            let o_mappings = original.mappings.filter(
300                |m: Mapping| m.va_range.start <= va < m.va_range.end,
301            );
302            let r_mappings = right.mappings.filter(
303                |m: Mapping| m.va_range.start <= va < m.va_range.end,
304            );
305
306            assert forall|m: Mapping| o_mappings.contains(m) implies r_mappings.contains(m) by {
307                assert(right.mappings.contains(m));
308                assert(r_mappings.contains(m));
309            }
310
311            assert(o_mappings == r_mappings);
312        }
313    }
314
315    /// Specification for merging two views.
316    ///
317    /// Mappings are unioned, and memory conflicts are resolved by
318    /// [`vstd::map::Map::union_prefer_right`] with `other` taking precedence.
319    pub open spec fn join_spec(self, other: MemView) -> MemView {
320        MemView {
321            mappings: self.mappings.union(other.mappings),
322            memory: self.memory.union_prefer_right(other.memory),
323        }
324    }
325
326    /// Tracked wrapper of [`Self::join_spec`].
327    ///
328    /// # Verified Properties
329    ///
330    /// ## Preconditions
331    /// - `old(self).mappings.disjoint(other.mappings)`.
332    ///
333    /// ## Postconditions
334    /// - `*self == old(self).join_spec(other)`.
335    #[verifier::external_body]
336    pub proof fn join(tracked &mut self, tracked other: Self)
337        requires
338            old(self).mappings.disjoint(other.mappings),
339        ensures
340            *final(self) == old(self).join_spec(other),
341    {
342        unimplemented!()
343    }
344
345    /// Lemma: splitting and then joining reconstructs the original view.
346    ///
347    /// # Verified Properties
348    ///
349    /// ## Preconditions
350    /// - `this.split_spec(vaddr, len) == (lhs, rhs)`.
351    /// - Every mapping in `this` starts at or after `vaddr`.
352    /// - Every physical frame tracked by `this.memory` is reachable from some
353    ///   virtual address at or after `vaddr`.
354    ///
355    /// ## Postconditions
356    /// - `lhs.join_spec(rhs)` has the same mappings and memory contents as `this`.
357    pub proof fn lemma_split_join_identity(
358        this: MemView,
359        lhs: MemView,
360        rhs: MemView,
361        vaddr: usize,
362        len: usize,
363    )
364        requires
365            this.split_spec(vaddr, len) == (lhs, rhs),
366            forall|m: Mapping|
367                #[trigger] this.mappings.contains(m) ==> vaddr <= m.va_range.start < m.va_range.end,
368            forall|pa: Paddr|
369                #[trigger] this.memory.contains_key(pa) ==> exists |va: usize|
370                    vaddr <= va && #[trigger] this.is_mapped(va, pa),
371        ensures
372            this.mappings == lhs.join_spec(rhs).mappings,
373            this.memory == lhs.join_spec(rhs).memory,
374    {
375    }
376}
377
378impl Inv for VirtPtr {
379    open spec fn inv(self) -> bool {
380        &&& self.range@.start <= self.vaddr <= self.range@.end
381        &&& self.range@.end >= self.range@.start
382    }
383}
384
385impl Clone for VirtPtr {
386    fn clone(&self) -> (res: Self)
387        ensures
388            res == self,
389    {
390        Self { vaddr: self.vaddr, range: Ghost(self.range@) }
391    }
392}
393
394impl Copy for VirtPtr {
395
396}
397
398impl VirtPtr {
399    /// Creates a pointer at `vaddr` with logical range `[vaddr, vaddr + len)`.
400    #[vstd::contrib::auto_spec]
401    pub fn new(vaddr: Vaddr, len: usize) -> Self
402    {
403        Self { vaddr, range: Ghost(Range { start: vaddr, end: (vaddr + len) as usize }) }
404    }
405
406    /// Whether the pointer has a non-null address and consistent bounds.
407    pub open spec fn is_defined(self) -> bool {
408        &&& self.vaddr != 0
409        &&& self.range@.start <= self.vaddr <= self.range@.end
410    }
411
412    /// Whether dereferencing the current pointer position is allowed.
413    ///
414    /// This excludes the one-past-end position (`vaddr == range.end`).
415    pub open spec fn is_valid(self) -> bool {
416        &&& self.is_defined()
417        &&& self.vaddr < self.range@.end
418    }
419
420    /// Reads one byte from `self.vaddr` in the provided memory view.
421    ///
422    /// # Verified Properties
423    ///
424    /// ## Preconditions
425    /// - `mem.addr_transl(self.vaddr) is Some`.
426    /// - Target byte is initialized:
427    ///   `mem.memory[mem.addr_transl(self.vaddr).unwrap().0].contents[mem.addr_transl(self.vaddr).unwrap().1 as int] is Init`.
428    /// - `self.is_valid()`.
429    ///
430    /// ## Postconditions
431    /// - Returns `mem.read(self.vaddr).value()`.
432    #[verifier::external_body]
433    pub fn read(self, Tracked(mem): Tracked<&MemView>) -> u8
434        requires
435            mem.addr_transl(self.vaddr) is Some,
436            mem.memory[mem.addr_transl(self.vaddr).unwrap().0].contents[mem.addr_transl(self.vaddr).unwrap().1 as int] is Init,
437            self.is_valid(),
438        returns
439            mem.read(self.vaddr).value(),
440    {
441        unimplemented!()
442    }
443
444    /// Writes one byte to `self.vaddr` in the provided memory view.
445    ///
446    /// # Verified Properties
447    ///
448    /// ## Preconditions
449    /// - `old(mem).addr_transl(self.vaddr) is Some`.
450    /// - `self.is_valid()`.
451    ///
452    /// ## Postconditions
453    /// - `*mem == old(mem).write(self.vaddr, x)`.
454    #[verifier::external_body]
455    pub fn write(self, Tracked(mem): Tracked<&mut MemView>, x: u8)
456        requires
457            old(mem).addr_transl(self.vaddr) is Some,
458            self.is_valid(),
459        ensures
460            *final(mem) == old(mem).write(self.vaddr, x),
461    {
462        unimplemented!()
463    }
464
465    pub open spec fn add_spec(self, n: usize) -> Self {
466        VirtPtr { vaddr: (self.vaddr + n) as usize, range: self.range }
467    }
468
469    /// Advances the pointer by `n` bytes.
470    ///
471    /// This operation only updates the cursor; it does not perform memory access.
472    ///
473    /// # Verified Properties
474    ///
475    /// ## Preconditions
476    /// - `0 <= old(self).vaddr + n < usize::MAX`.
477    ///
478    /// ## Postconditions
479    /// - `*self == old(self).add_spec(n)`.
480    pub fn add(&mut self, n: usize)
481        requires
482    // Option 1: strict C standard compliance
483    // old(self).range@.start <= self.vaddr + n <= old(self).range@.end,
484    // Option 2: just make sure it doesn't overflow
485
486            0 <= old(self).vaddr + n < usize::MAX,
487        ensures
488            *final(self) == old(self).add_spec(n),
489    // If we take option 1, we can also ensure:
490    // self.is_defined()
491
492    {
493        self.vaddr = self.vaddr + n
494    }
495
496    pub open spec fn wrapping_add_spec(self, n: usize) -> Self {
497        VirtPtr { vaddr: self.vaddr.wrapping_add(n), range: self.range }
498    }
499
500    /// Returns a new pointer whose address is `self.vaddr` advanced by `n`
501    /// bytes using wrapping arithmetic.
502    pub fn wrapping_add(self, n: usize) -> (res: Self)
503        returns self.wrapping_add_spec(n),
504    {
505        VirtPtr { vaddr: self.vaddr.wrapping_add(n), range: self.range }
506    }
507
508    pub open spec fn read_offset_spec(self, mem: MemView, n: usize) -> u8 {
509        mem.read((self.vaddr + n) as usize).value()
510    }
511
512    /// Reads from `self.vaddr + n` without mutating `self`.
513    ///
514    /// Implemented by creating a temporary pointer and calling [`Self::read`].
515    /// When `self.vaddr == self.range.start`, this behaves like array indexing.
516    ///
517    /// # Verified Properties
518    ///
519    /// ## Preconditions
520    /// - `0 < self.vaddr + n < usize::MAX`.
521    /// - `self.range@.start <= self.vaddr + n < self.range@.end`.
522    /// - `mem.addr_transl((self.vaddr + n) as usize) is Some`.
523    /// - Target byte at offset is initialized.
524    ///
525    /// ## Postconditions
526    /// - Returns `self.read_offset_spec(*mem, n)`.
527    pub fn read_offset(&self, Tracked(mem): Tracked<&MemView>, n: usize) -> u8
528        requires
529            0 < self.vaddr + n < usize::MAX,
530            self.range@.start <= self.vaddr + n < self.range@.end,
531            mem.addr_transl((self.vaddr + n) as usize) is Some,
532            mem.memory[mem.addr_transl((self.vaddr + n) as usize).unwrap().0].contents[mem.addr_transl((self.vaddr + n) as usize).unwrap().1 as int] is Init,
533        returns
534            self.read_offset_spec(*mem, n),
535    {
536        let mut tmp = self.clone();
537        tmp.add(n);
538        tmp.read(Tracked(mem))
539    }
540
541    pub open spec fn write_offset_spec(self, mem: MemView, n: usize, x: u8) -> MemView {
542        mem.write((self.vaddr + n) as usize, x)
543    }
544
545    /// Writes to `self.vaddr + n` without mutating `self`.
546    ///
547    /// # Verified Properties
548    ///
549    /// ## Preconditions
550    /// - `self.inv()`.
551    /// - `self.range@.start <= self.vaddr + n < self.range@.end`.
552    /// - `old(mem).addr_transl((self.vaddr + n) as usize) is Some`.
553    pub fn write_offset(&self, Tracked(mem): Tracked<&mut MemView>, n: usize, x: u8)
554        requires
555            self.inv(),
556            0 < self.vaddr + n < usize::MAX,
557            self.range@.start <= self.vaddr + n < self.range@.end,
558            old(mem).addr_transl((self.vaddr + n) as usize) is Some,
559    {
560        let mut tmp = self.clone();
561        tmp.add(n);
562        tmp.write(Tracked(mem), x)
563    }
564
565    pub open spec fn copy_offset_spec(src: Self, dst: Self, mem_src: MemView, mem_dst: MemView, n: usize) -> MemView {
566        let x = src.read_offset_spec(mem_src, n);
567        dst.write_offset_spec(mem_dst, n, x)
568    }
569
570    /// Copies one byte from `src + n` to `dst + n`.
571    ///
572    /// Source and destination are reasoned about via distinct memory views.
573    ///
574    /// # Verified Properties
575    ///
576    /// ## Preconditions
577    /// - `src.inv()` and `dst.inv()`.
578    /// - Source offset and destination offset are both in-range.
579    /// - Source offset is mapped and initialized in `mem_src`.
580    /// - Destination offset is mapped in `old(mem_dst)`.
581    ///
582    /// ## Postconditions
583    /// - `*mem_dst == Self::copy_offset_spec(*src, *dst, *mem_src, *old(mem_dst), n)`.
584    /// - `mem_dst.mappings == old(mem_dst).mappings`.
585    /// - `mem_dst.memory.dom() == old(mem_dst).memory.dom()`.
586    pub fn copy_offset(src: &Self, dst: &Self, Tracked(mem_src): Tracked<&MemView>, Tracked(mem_dst): Tracked<&mut MemView>, n: usize)
587        requires
588            src.inv(),
589            dst.inv(),
590            0 < src.vaddr + n < usize::MAX,
591            0 < dst.vaddr + n < usize::MAX,
592            src.range@.start <= src.vaddr + n < src.range@.end,
593            mem_src.addr_transl((src.vaddr + n) as usize) is Some,
594            mem_src.memory.contains_key(mem_src.addr_transl((src.vaddr + n) as usize).unwrap().0),
595            mem_src.memory[mem_src.addr_transl((src.vaddr + n) as usize).unwrap().0].contents[mem_src.addr_transl((src.vaddr + n) as usize).unwrap().1 as int] is Init,
596
597            dst.range@.start <= dst.vaddr + n < dst.range@.end,
598            old(mem_dst).addr_transl((dst.vaddr + n) as usize) is Some,
599        ensures
600            *final(mem_dst) == Self::copy_offset_spec(*src, *dst, *mem_src, *old(mem_dst), n),
601            final(mem_dst).mappings == old(mem_dst).mappings,
602            final(mem_dst).memory.dom() == old(mem_dst).memory.dom(),
603
604    {
605        let x = src.read_offset(Tracked(mem_src), n);
606        proof { admit() };
607        dst.write_offset(Tracked(mem_dst), n, x)
608    }
609
610    pub open spec fn memcpy_spec(src: Self, dst: Self, mem_src: MemView, mem_dst: MemView, n: usize) -> MemView
611        decreases n,
612    {
613        if n == 0 {
614            mem_dst
615        } else {
616            let mem_dst_1 = Self::copy_offset_spec(src, dst, mem_src, mem_dst, (n - 1) as usize);
617
618            Self::memcpy_spec(src, dst, mem_src, mem_dst_1, (n - 1) as usize)
619        }
620    }
621
622    /// Copies `n` bytes from `src` to `dst` in the given memory view.
623    ///
624    /// The source and destination must *not* overlap.
625    /// `copy_nonoverlapping` is semantically equivalent to C’s `memcpy`,
626    /// but with the source and destination arguments swapped.
627    ///
628    /// `mem` points to `src`'s owned memory regions.
629    ///
630    /// # Verified Properties
631    ///
632    /// ## Preconditions
633    /// - `src.inv()` and `dst.inv()`.
634    /// - Source and destination ranges are non-overlapping.
635    /// - Source slice `[src.vaddr, src.vaddr + n)` is in-range and initialized in `mem_src`.
636    /// - Destination slice `[dst.vaddr, dst.vaddr + n)` is in-range and mapped in `old(mem_dst)`.
637    ///
638    /// ## Postconditions
639    /// - `*mem_dst == Self::memcpy_spec(*src, *dst, *mem_src, *old(mem_dst), n)`.
640    /// - `mem_dst.mappings == old(mem_dst).mappings`.
641    /// - `mem_dst.memory.dom() == old(mem_dst).memory.dom()`.
642    /// - Destination slice remains mapped in `mem_dst`.
643    pub fn copy_nonoverlapping(
644        src: &Self,
645        dst: &Self,
646        Tracked(mem_src): Tracked<&MemView>,
647        Tracked(mem_dst): Tracked<&mut MemView>,
648        n: usize,
649    )
650        requires
651            src.inv(),
652            dst.inv(),
653            src.vaddr > 0,
654            dst.vaddr > 0,
655            src.range@.end <= dst.range@.start || dst.range@.end <= src.range@.start,
656            src.range@.start <= src.vaddr,
657            src.vaddr + n <= src.range@.end,
658            forall|i: usize|
659                #![trigger mem_src.addr_transl(i)]
660                src.vaddr <= i < src.vaddr + n ==> {
661                    &&& mem_src.addr_transl(i) is Some
662                    &&& mem_src.memory.contains_key(mem_src.addr_transl(i).unwrap().0)
663                    &&& mem_src.memory[mem_src.addr_transl(i).unwrap().0].contents[mem_src.addr_transl(i).unwrap().1 as int] is Init
664                },
665            dst.range@.start <= dst.vaddr,
666            dst.vaddr + n <= dst.range@.end,
667            forall|i: usize|
668                dst.vaddr <= i < dst.vaddr + n ==> {
669                    &&& old(mem_dst).addr_transl(i) is Some
670                },
671        ensures
672            *final(mem_dst) == Self::memcpy_spec(*src, *dst, *mem_src, *old(mem_dst), n),
673            final(mem_dst).mappings == old(mem_dst).mappings,
674            final(mem_dst).memory.dom() == old(mem_dst).memory.dom(),
675            forall|i: usize|
676                #![trigger final(mem_dst).addr_transl(i)]
677                dst.vaddr <= i < dst.vaddr + n ==> {
678                    &&& final(mem_dst).addr_transl(i) is Some
679            },
680        decreases n,
681    {
682        if n == 0 {
683            return ;
684        } else {
685            let ghost mem0 = *mem_dst;
686
687            Self::copy_offset(src, dst, Tracked(mem_src), Tracked(mem_dst), n - 1);
688
689            proof {
690                assert(forall|i: usize|
691                    dst.vaddr <= i < dst.vaddr + n - 1 ==>
692                    mem_dst.addr_transl(i) == #[trigger] mem0.addr_transl(i)
693                );
694                assert forall|i: usize|
695                    dst.vaddr <= i < dst.vaddr + n - 1 implies mem_dst.addr_transl(i) is Some by {
696                    assert(mem_dst.addr_transl(i) == mem0.addr_transl(i));
697                }
698            }
699
700            Self::copy_nonoverlapping(src, dst, Tracked(mem_src), Tracked(mem_dst), n - 1);
701        }
702    }
703
704    /// Builds a valid [`VirtPtr`] from a concrete virtual-address range.
705    ///
706    /// # Verified Properties
707    ///
708    /// ## Preconditions
709    /// - `vaddr != 0`.
710    /// - `0 < len <= usize::MAX - vaddr`.
711    ///
712    /// ## Postconditions
713    /// - `r.is_valid()`.
714    /// - `r.range@.start == vaddr`.
715    /// - `r.range@.end == (vaddr + len) as usize`.
716    pub fn from_vaddr(vaddr: usize, len: usize) -> (r: Self)
717        requires
718            len <= usize::MAX - vaddr,
719        ensures
720            r.inv(),
721            r.vaddr == vaddr,
722            r.range@.start == vaddr,
723            r.range@.end == (vaddr + len) as usize,
724            vaddr != 0 && len > 0 ==> r.is_valid(),
725    {
726        Self { vaddr, range: Ghost(Range { start: vaddr, end: (vaddr + len) as usize }) }
727    }
728
729    /// Executable helper to split the [`VirtPtr`] into two contiguous ranges.
730    ///
731    /// This updates ghost ranges to match a split operation over the same region.
732    ///
733    /// # Verified Properties
734    ///
735    /// ## Preconditions
736    /// - `self.is_valid()`.
737    /// - `0 <= n <= self.range@.end - self.range@.start`.
738    /// - `self.vaddr == self.range@.start`.
739    ///
740    /// ## Postconditions
741    /// - Left pointer covers `[self.range@.start, self.range@.start + n)`.
742    /// - Right pointer covers `[self.range@.start + n, self.range@.end)`.
743    #[verus_spec(r =>
744        requires
745            self.is_valid(),
746            0 <= n <= self.range@.end - self.range@.start,
747            self.vaddr == self.range@.start,
748        ensures
749            r.0.range@.start == self.range@.start,
750            r.0.range@.end == self.range@.start + n,
751            r.0.vaddr == self.range@.start,
752            r.1.range@.start == self.range@.start + n,
753            r.1.range@.end == self.range@.end,
754            r.1.vaddr == self.range@.start + n,
755    )]
756    pub fn split(self, n: usize) -> (Self, Self) {
757        let left = VirtPtr {
758            vaddr: self.vaddr,
759            range: Ghost(Range { start: self.vaddr, end: (self.vaddr + n) as usize }),
760        };
761
762        let right = VirtPtr {
763            vaddr: self.vaddr + n,
764            range: Ghost(Range { start: (self.vaddr + n) as usize, end: self.range@.end }),
765        };
766
767        (left, right)
768    }
769}
770
771/// A [`GlobalMemView`] is a more abstract view of memory that elides most of the details. The API specifications
772/// of higher-level objects like [`VmSpaceOwner`](crate::specs::mm::vm_space::VmSpaceOwner) use this view.
773pub tracked struct GlobalMemView {
774    pub pt_mappings: Set<Mapping>,
775    pub tlb_mappings: Set<Mapping>,
776    pub unmapped_pas: Set<Paddr>,
777    pub memory: Map<Paddr, FrameContents>,
778}
779
780impl Inv for GlobalMemView {
781
782    open spec fn inv(self) -> bool {
783        &&& forall |m: Mapping| #![auto] self.tlb_mappings.contains(m) ==> {
784            &&& m.inv()
785            &&& forall|pa: Paddr| m.pa_range.start <= pa < m.pa_range.end ==> {
786                &&& self.memory.dom().contains(pa)
787            }
788            &&& self.memory.contains_key(m.pa_range.start)
789            &&& self.memory[m.pa_range.start].size == m.page_size
790            &&& self.memory[m.pa_range.start].inv()
791        }
792        &&& forall |m: Mapping|
793            forall |n: Mapping| #![auto]
794            self.tlb_mappings.contains(m) ==>
795            self.tlb_mappings.contains(n) ==>
796            m != n ==>
797            #[trigger]
798            m.va_range.end <= n.va_range.start || n.va_range.end <= m.va_range.start
799        &&& self.tlb_mappings.finite()
800        &&& self.pt_mappings.finite()
801        &&& self.memory.dom().finite()
802        &&& self.all_pas_accounted_for()
803        &&& self.pas_uniquely_mapped()
804        &&& self.unmapped_correct()
805    }
806}
807
808impl GlobalMemView {
809
810    pub open spec fn addr_transl(self, va: usize) -> Option<(usize, usize)> {
811        let mappings = self.tlb_mappings.filter(|m: Mapping| m.va_range.start <= va < m.va_range.end);
812        if 0 < mappings.len() {
813            let m = mappings.choose();  // In a well-formed TLB there will only be one, but if malformed this is non-deterministic!
814            let off = va - m.va_range.start;
815            Some((m.pa_range.start, off as usize))
816        } else {
817            None
818        }
819    }
820
821    pub open spec fn is_mapped(self, pa: usize) -> bool {
822        exists|m: Mapping| self.tlb_mappings.contains(m) && m.pa_range.start <= pa < m.pa_range.end
823    }
824
825    pub open spec fn all_pas_accounted_for(self) -> bool {
826        forall|pa: Paddr|
827            0 <= pa < MAX_PADDR ==>
828            #[trigger] self.is_mapped(pa) || #[trigger] self.unmapped_pas.contains(pa)
829    }
830
831    pub open spec fn pas_uniquely_mapped(self) -> bool {
832        forall|m1: Mapping, m2: Mapping|
833             #![trigger self.tlb_mappings.contains(m1), self.tlb_mappings.contains(m2)]
834             self.tlb_mappings.contains(m1) && self.tlb_mappings.contains(m2) && m1 != m2 ==>
835            m1.pa_range.end <= m2.pa_range.start || m2.pa_range.end <= m1.pa_range.start
836    }
837
838    pub open spec fn unmapped_correct(self) -> bool {
839        forall|pa: Paddr|
840            #![auto]
841            self.is_mapped(pa) <==> !self.unmapped_pas.contains(pa)
842    }
843
844    pub open spec fn take_view_spec(self, vaddr: usize, len: usize) -> (Self, MemView) {
845        let range_end = vaddr + len;
846
847        let leave_mappings: Set<Mapping> = self.tlb_mappings.filter(
848            |m: Mapping| m.va_range.end <= vaddr || m.va_range.start > range_end
849        );
850
851        let take_mappings = self.tlb_mappings.filter(
852            |m: Mapping| m.va_range.start < range_end && m.va_range.end > vaddr
853        );
854
855        let leave_pas = Set::new(
856            |pa: usize|
857                exists|m: Mapping| leave_mappings.contains(m) && m.pa_range.start <= pa < m.pa_range.end
858        );
859        let take_pas = Set::new(
860            |pa: usize|
861                exists|m: Mapping| take_mappings.contains(m) && m.pa_range.start <= pa < m.pa_range.end
862        );
863
864        (
865            GlobalMemView {
866                tlb_mappings: leave_mappings,
867                memory: self.memory.restrict(leave_pas),
868                ..self
869            },
870            MemView { mappings: take_mappings, memory: self.memory.restrict(take_pas) },
871        )
872    }
873
874    pub axiom fn take_view(tracked &mut self, vaddr: usize, len: usize) -> (tracked view: MemView)
875        ensures
876            *final(self) == old(self).take_view_spec(vaddr, len).0,
877            view == old(self).take_view_spec(vaddr, len).1;
878
879    pub open spec fn return_view_spec(self, view: MemView) -> Self {
880        GlobalMemView {
881            tlb_mappings: self.tlb_mappings.union(view.mappings),
882            memory: self.memory.union_prefer_right(view.memory),
883            ..self
884        }
885    }
886
887    pub axiom fn return_view(&mut self, view: MemView)
888        ensures
889            *final(self) == old(self).return_view_spec(view);
890
891    pub open spec fn tlb_flush_vaddr_spec(self, vaddr: Vaddr) -> Self {
892        let tlb_mappings = self.tlb_mappings.filter(
893            |m: Mapping| m.va_range.end <= vaddr || vaddr < m.va_range.start
894        );
895        GlobalMemView {
896            tlb_mappings,
897            ..self
898        }
899    }
900
901    pub axiom fn tlb_flush_vaddr(&mut self, vaddr: Vaddr)
902        requires
903            old(self).inv()
904        ensures
905            *final(self) == old(self).tlb_flush_vaddr_spec(vaddr),
906            final(self).inv();
907
908    pub open spec fn tlb_soft_fault_spec(self, vaddr: Vaddr) -> Self {
909        let mapping = self.pt_mappings.filter(|m: Mapping| m.va_range.start <= vaddr < m.va_range.end).choose();
910        GlobalMemView {
911            tlb_mappings: self.tlb_mappings.insert(mapping),
912            ..self
913        }
914    }
915
916    pub axiom fn tlb_soft_fault(tracked &mut self, vaddr: Vaddr)
917        requires
918            old(self).inv(),
919            old(self).addr_transl(vaddr) is None,
920        ensures
921            *final(self) == old(self).tlb_soft_fault_spec(vaddr),
922            final(self).inv();
923
924    pub open spec fn pt_map_spec(self, m: Mapping) -> Self {
925        let pt_mappings = self.pt_mappings.insert(m);
926        let unmapped_pas = self.unmapped_pas.difference(
927            Set::new(|pa: usize| m.pa_range.start <= pa < m.pa_range.end)
928        );
929        GlobalMemView {
930            pt_mappings,
931            unmapped_pas,
932            ..self
933        }
934    }
935
936    pub axiom fn pt_map(&mut self, m: Mapping)
937        requires
938            forall|pa: Paddr|
939                m.pa_range.start <= pa < m.pa_range.end ==>
940                old(self).unmapped_pas.contains(pa),
941            old(self).inv()
942        ensures
943            *final(self) == old(self).pt_map_spec(m);
944
945    pub open spec fn pt_unmap_spec(self, m: Mapping) -> Self {
946        let pt_mappings = self.pt_mappings.remove(m);
947        let unmapped_pas = self.unmapped_pas.union(
948            Set::new(|pa: usize| m.pa_range.start <= pa < m.pa_range.end)
949        );
950        GlobalMemView {
951            pt_mappings,
952            unmapped_pas,
953            ..self
954        }
955    }
956
957    pub axiom fn pt_unmap(&mut self, m: Mapping)
958        requires
959            old(self).pt_mappings.contains(m),
960            old(self).inv()
961        ensures
962            *final(self) == old(self).pt_unmap_spec(m),
963            final(self).inv();
964
965    pub proof fn lemma_va_mapping_unique(self, va: usize)
966        requires
967            self.inv(),
968        ensures
969            self.tlb_mappings.filter(|m: Mapping| m.va_range.start <= va < m.va_range.end).is_singleton(),
970    {
971        admit()
972    }
973}
974
975} // verus!