ostd/mm/
io.rs

1// SPDX-License-Identifier: MPL-2.0
2//! Abstractions for reading and writing virtual memory (VM) objects.
3//!
4//! # Safety
5//!
6//! The core virtual memory (VM) access APIs provided by this module are [`VmReader`] and
7//! [`VmWriter`], which allow for writing to or reading from a region of memory _safely_.
8//! `VmReader` and `VmWriter` objects can be constructed from memory regions of either typed memory
9//! (e.g., `&[u8]`) or untyped memory (e.g, [`UFrame`]). Behind the scene, `VmReader` and `VmWriter`
10//! must be constructed via their [`from_user_space`] and [`from_kernel_space`] methods, whose
11//! safety depends on whether the given memory regions are _valid_ or not.
12//!
13//! [`UFrame`]: crate::mm::UFrame
14//! [`from_user_space`]: `VmReader::from_user_space`
15//! [`from_kernel_space`]: `VmReader::from_kernel_space`
16//!
17//! Here is a list of conditions for memory regions to be considered valid:
18//!
19//! - The memory region as a whole must be either typed or untyped memory, not both typed and
20//!   untyped.
21//!
22//! - If the memory region is typed, we require that:
23//!   - the [validity requirements] from the official Rust documentation must be met, and
24//!   - the type of the memory region (which must exist since the memory is typed) must be
25//!     plain-old-data, so that the writer can fill it with arbitrary data safely.
26//!
27//! [validity requirements]: core::ptr#safety
28//!
29//! - If the memory region is untyped, we require that:
30//!   - the underlying pages must remain alive while the validity requirements are in effect, and
31//!   - the kernel must access the memory region using only the APIs provided in this module, but
32//!     external accesses from hardware devices or user programs do not count.
33//!
34//! We have the last requirement for untyped memory to be valid because the safety interaction with
35//! other ways to access the memory region (e.g., atomic/volatile memory loads/stores) is not
36//! currently specified. Tis may be relaxed in the future, if appropriate and necessary.
37//!
38//! Note that data races on untyped memory are explicitly allowed (since pages can be mapped to
39//! user space, making it impossible to avoid data races). However, they may produce erroneous
40//! results, such as unexpected bytes being copied, but do not cause soundness problems.
41use crate::specs::arch::PAGE_SIZE;
42use core::marker::PhantomData;
43use core::ops::Range;
44use vstd::pervasive::arbitrary;
45use vstd::prelude::*;
46use vstd::simple_pptr::*;
47use vstd::std_specs::convert::TryFromSpecImpl;
48use vstd_extra::array_ptr::ArrayPtr;
49use vstd_extra::array_ptr::PointsToArray;
50use vstd_extra::ownership::Inv;
51
52use crate::error::*;
53use crate::mm::pod::{Pod, PodOnce};
54use crate::specs::arch::kspace::{KERNEL_BASE_VADDR, KERNEL_END_VADDR};
55use crate::specs::arch::MAX_USERSPACE_VADDR_SPEC;
56use crate::specs::mm::page_table::Mapping;
57use crate::specs::mm::virt_mem_newer::{MemView, VirtPtr};
58
59verus! {
60
61#[verus_spec(r =>
62        with
63            Tracked(owner_r): Tracked<&mut VmIoOwner<'_>>,
64            Tracked(owner_w): Tracked<&mut VmIoOwner<'_>>,
65        requires
66            old(reader).inv(),
67            old(writer).inv(),
68            old(owner_r).inv(),
69            old(owner_w).inv(),
70            old(owner_r).inv_with_reader(*old(reader)),
71            old(owner_w).inv_with_writer(*old(writer)),
72            old(owner_r).is_fallible && old(owner_w).is_fallible, // both fallible
73        ensures
74            reader.inv(),
75            writer.inv(),
76            owner_r.inv(),
77            owner_w.inv(),
78            owner_r.inv_with_reader(*reader),
79            owner_w.inv_with_writer(*writer),
80            owner_r.params_eq(*old(owner_r)),
81            owner_w.params_eq(*old(owner_w)),
82    )]
83pub fn rw_fallible(reader: &mut VmReader<'_>, writer: &mut VmWriter<'_>) -> core::result::Result<
84    usize,
85    (Error, usize),
86> {
87    Ok(0)  // placeholder.
88
89}
90
91/// Copies `len` bytes from `src` to `dst`.
92///
93/// # Safety
94///
95/// - `src` must be [valid] for reads of `len` bytes.
96/// - `dst` must be [valid] for writes of `len` bytes.
97///
98/// [valid]: crate::mm::io#safety
99#[inline]
100#[verifier::external_body]
101#[verus_spec(
102    requires
103        KERNEL_BASE_VADDR() <= dst && dst + len <= KERNEL_END_VADDR() &&
104        KERNEL_BASE_VADDR() <= src && src + len <= KERNEL_END_VADDR()
105)]
106unsafe fn memcpy(dst: usize, src: usize, len: usize) {
107    // This method is implemented by calling `volatile_copy_memory`. Note that even with the
108    // "volatile" keyword, data races are still considered undefined behavior (UB) in both the Rust
109    // documentation and the C/C++ standards. In general, UB makes the behavior of the entire
110    // program unpredictable, usually due to compiler optimizations that assume the absence of UB.
111    // However, in this particular case, considering that the Linux kernel uses the "volatile"
112    // keyword to implement `READ_ONCE`/`WRITE_ONCE`, the compiler is extremely unlikely to
113    // break our code unless it also breaks the Linux kernel.
114    //
115    // For more details and future possibilities, see
116    // <https://github.com/asterinas/asterinas/pull/1001#discussion_r1667317406>.
117    // SAFETY: The safety is guaranteed by the safety preconditions and the explanation above.
118    core::intrinsics::volatile_copy_memory(dst as *mut u8, src as *const u8, len);
119}
120
121/// `VmReader` is a reader for reading data from a contiguous range of memory.
122///
123/// The memory range read by `VmReader` can be in either kernel space or user space.
124/// When the operating range is in kernel space, the memory within that range
125/// is guaranteed to be valid, and the corresponding memory reads are infallible.
126/// When the operating range is in user space, it is ensured that the page table of
127/// the process creating the `VmReader` is active for the duration of `'a`,
128/// and the corresponding memory reads are considered fallible.
129///
130/// When perform reading with a `VmWriter`, if one of them represents typed memory,
131/// it can ensure that the reading range in this reader and writing range in the
132/// writer are not overlapped.
133///
134/// NOTE: The overlap mentioned above is at both the virtual address level
135/// and physical address level. There is not guarantee for the operation results
136/// of `VmReader` and `VmWriter` in overlapping untyped addresses, and it is
137/// the user's responsibility to handle this situation.
138pub struct VmReader<'a  /*, Fallibility = Fallible*/ > {
139    pub id: Ghost<nat>,
140    pub cursor: VirtPtr,
141    pub end: VirtPtr,
142    pub phantom: PhantomData<&'a [u8]  /*, Fallibility)*/ >,
143}
144
145/// The memory view used for VM I/O operations.
146pub tracked enum VmIoMemView<'a> {
147    /// An owned memory for writing.
148    WriteView(MemView),
149    /// A shared memory for reading.
150    ReadView(&'a MemView),
151}
152
153/// This looks like we can implement a single struct with a type parameter
154pub tracked struct VmIoOwner<'a> {
155    /// The unique identifier of this owner.
156    pub id: Ghost<nat>,
157    /// The virtual address range owned by this owner.
158    pub range: Ghost<Range<usize>>,
159    /// Whether this reader is fallible.
160    pub is_fallible: bool,
161    /// The mem view associated with this owner.
162    // pub mem_view: MemView,
163    pub phantom: PhantomData<&'a [u8]  /*, Fallibility)*/ >,
164    /// Whether this owner is for kernel space.
165    pub is_kernel: bool,
166    /// The mem view associated with this owner.
167    pub mem_view: Option<VmIoMemView<'a>>,
168}
169
170impl Inv for VmIoOwner<'_> {
171    open spec fn inv(self) -> bool {
172        // We do allow ZSTs so that empty ranges are valid.
173        &&& self.range@.start <= self.range@.end
174        &&& match self.mem_view {
175            // Case 1: Write (exclusive)
176            Some(VmIoMemView::WriteView(mv)) => {
177                &&& mv.mappings.finite()
178                &&& mv.mappings_are_disjoint()
179                &&& forall|va: usize|
180                    self.range@.start <= va < self.range@.end ==> {
181                        &&& #[trigger] mv.addr_transl(va) is Some
182                    }
183            },
184            // Case 2: Read (shared)
185            Some(VmIoMemView::ReadView(mv)) => {
186                &&& mv.mappings.finite()
187                &&& mv.mappings_are_disjoint()
188                &&& forall|va: usize|
189                    self.range@.start <= va < self.range@.end ==> {
190                        &&& #[trigger] mv.addr_transl(va) is Some
191                    }
192            },
193            // Case 3: Empty/Invalid; this means no memory is accessible,
194            //         and we are just creating a placeholder.
195            None => true,
196        }
197    }
198}
199
200impl VmIoOwner<'_> {
201    /// Checks whether this owner overlaps with another owner.
202    #[verifier::inline]
203    pub open spec fn overlaps(self, other: VmIoOwner<'_>) -> bool {
204        !self.disjoint(other)
205    }
206
207    #[verifier::inline]
208    pub open spec fn overlaps_with_range(self, range: Range<usize>) -> bool {
209        &&& self.range@.start <= range.end
210        &&& range.start <= self.range@.end
211    }
212
213    /// Checks whether this owner is disjoint with another owner.
214    #[verifier::inline]
215    pub open spec fn disjoint(self, other: VmIoOwner<'_>) -> bool {
216        &&& !self.overlaps_with_range(other.range@)
217        &&& match (self.mem_view, other.mem_view) {
218            (Some(lhs), Some(rhs)) => match (lhs, rhs) {
219                (VmIoMemView::WriteView(lmv), VmIoMemView::WriteView(rmv)) => {
220                    lmv.mappings.disjoint(rmv.mappings)
221                },
222                (VmIoMemView::WriteView(lmv), VmIoMemView::ReadView(rmv)) => {
223                    lmv.mappings.disjoint(rmv.mappings)
224                },
225                (VmIoMemView::ReadView(lmv), VmIoMemView::WriteView(rmv)) => {
226                    lmv.mappings.disjoint(rmv.mappings)
227                },
228                (VmIoMemView::ReadView(lmv), VmIoMemView::ReadView(rmv)) => {
229                    lmv.mappings.disjoint(rmv.mappings)
230                },
231            },
232            _ => true,
233        }
234    }
235
236    #[verifier::inline]
237    pub open spec fn params_eq(self, other: VmIoOwner<'_>) -> bool {
238        &&& self.range@ == other.range@
239        &&& self.is_fallible == other.is_fallible
240    }
241
242    /// Changes the fallibility of this owner.
243    pub proof fn change_fallible(tracked &mut self, tracked fallible: bool)
244        requires
245            old(self).inv(),
246            old(self).is_fallible != fallible,
247        ensures
248            self.inv(),
249            self.is_fallible == fallible,
250    {
251        self.is_fallible = fallible;
252    }
253
254    /// Advances the cursor of the reader/writer by the given number of bytes.
255    ///
256    /// Note this will return the advanced `VmIoMemView` as the previous permission
257    /// is no longer needed and must be discarded then.
258    pub proof fn advance(tracked &mut self, nbytes: usize) -> (tracked res: VmIoMemView<'_>)
259        requires
260            old(self).inv(),
261            old(self).mem_view is Some,
262            nbytes <= old(self).range@.end - old(self).range@.start,
263        ensures
264            self.inv(),
265            self.range@.start == old(self).range@.start + nbytes,
266            self.range@.end == old(self).range@.end,
267            self.is_fallible == old(self).is_fallible,
268            self.id == old(self).id,
269            self.is_kernel == old(self).is_kernel,
270    {
271        let start = self.range@.start;
272        let old_end = self.range@.end;
273        self.range = Ghost((start + nbytes) as usize..self.range@.end);
274        // Take this option and leaves the `None` in its place temporarily.
275        let tracked inner = self.mem_view.tracked_take();
276        let tracked ret_perm = match inner {
277            VmIoMemView::WriteView(mv) => {
278                let tracked (lhs, rhs) = mv.split(start, nbytes);
279
280                assert(forall|va: usize|
281                    start <= va < start + nbytes ==> mv.addr_transl(va) is Some);
282
283                // Update the mem view to the remaining part.
284                self.mem_view = Some(VmIoMemView::WriteView(rhs));
285
286                assert(self.inv()) by {
287                    assert forall|va: usize| start + nbytes <= va < old_end implies {
288                        #[trigger] rhs.addr_transl(va) is Some
289                    } by {
290                        assert(mv.addr_transl(va) is Some) by {
291                            assert(old(self).inv());
292                        }
293
294                        let old_mappings = mv.mappings.filter(
295                            |m: Mapping| m.va_range.start <= va < m.va_range.end,
296                        );
297                        let new_mappings = rhs.mappings.filter(
298                            |m: Mapping| m.va_range.start <= va < m.va_range.end,
299                        );
300                        assert(old_mappings.len() != 0);
301                        assert(rhs.mappings =~= mv.mappings.filter(
302                            |m: Mapping| m.va_range.end > start + nbytes,
303                        ));
304                        assert(new_mappings =~= mv.mappings.filter(
305                            |m: Mapping|
306                                m.va_range.start <= va < m.va_range.end && m.va_range.end > start
307                                    + nbytes,
308                        ));
309
310                        assert(new_mappings.len() != 0) by {
311                            broadcast use vstd::set::group_set_axioms;
312
313                            let m = old_mappings.choose();
314                            // m.start <= va < m.end
315                            assert(start + nbytes <= va);
316                            assert(m.va_range.end > va) by {
317                                if (m.va_range.end <= va) {
318                                    assert(!old_mappings.contains(m));
319                                }
320                            }
321                            assert(m.va_range.end > start + nbytes);
322                            assert(old_mappings.contains(m));
323                            assert(old_mappings.subset_of(mv.mappings));
324                            assert(rhs.mappings.contains(m));
325                            assert(new_mappings.contains(m));
326                            assert(new_mappings.len() >= 1);
327                        }
328                    }
329                }
330
331                VmIoMemView::WriteView(lhs)
332            },
333            VmIoMemView::ReadView(mv) => {
334                let tracked sub_view = mv.borrow_at(start, nbytes);
335                // Since reads are shared so we don't need to
336                // modify the original view; here we just put
337                // it back.
338                self.mem_view = Some(VmIoMemView::ReadView(mv));
339                VmIoMemView::ReadView(sub_view)
340            },
341        };
342
343        ret_perm
344    }
345
346    /// Unwraps the read view.
347    pub proof fn tracked_read_view_unwrap(tracked &self) -> (tracked r: &MemView)
348        requires
349            self.inv(),
350            self.mem_view matches Some(VmIoMemView::ReadView(_)),
351        ensures
352            VmIoMemView::ReadView(r) == self.mem_view.unwrap(),
353    {
354        match self.mem_view {
355            Some(VmIoMemView::ReadView(r)) => r,
356            _ => { proof_from_false() },
357        }
358    }
359}
360
361impl Inv for VmWriter<'_> {
362    open spec fn inv(self) -> bool {
363        &&& self.cursor.inv()
364        &&& self.end.inv()
365        &&& self.cursor.vaddr
366            <= self.end.vaddr
367        // Ensure that they point to the same range as in VmIoOwner.
368        &&& self.cursor.range@ == self.end.range@
369    }
370}
371
372impl Inv for VmReader<'_> {
373    open spec fn inv(self) -> bool {
374        &&& self.cursor.inv()
375        &&& self.end.inv()
376        &&& self.cursor.vaddr
377            <= self.end.vaddr
378        // Ensure that they point to the same range as in VmIoOwner.
379        &&& self.cursor.range@ == self.end.range@
380    }
381}
382
383impl VmIoOwner<'_> {
384    pub open spec fn inv_with_reader(
385        self,
386        reader: VmReader<'_  /* Fallibility */ >,
387    ) -> bool {
388        &&& self.inv()
389        &&& self.range@.start == reader.cursor.vaddr
390        &&& self.range@.end == reader.end.vaddr
391        &&& self.id == reader.id
392        &&& self.mem_view matches Some(VmIoMemView::ReadView(mv)) ==> {
393            // Ensure that the mem view covers the entire range.
394            forall|va: usize|
395                self.range@.start <= va < self.range@.end ==> {
396                    &&& #[trigger] mv.addr_transl(va) is Some
397                }
398        }
399    }
400
401    pub open spec fn inv_with_writer(
402        self,
403        writer: VmWriter<'_  /* Fallibility */ >,
404    ) -> bool {
405        &&& self.inv()
406        &&& self.range@.start == writer.cursor.vaddr
407        &&& self.range@.end == writer.end.vaddr
408        &&& self.id == writer.id
409        &&& self.mem_view matches Some(VmIoMemView::WriteView(mv)) ==> {
410            // Ensure that the mem view covers the entire range.
411            forall|va: usize|
412                self.range@.start <= va < self.range@.end ==> {
413                    &&& #[trigger] mv.addr_transl(va) is Some
414                }
415        }
416    }
417}
418
419/// `VmWriter` is a writer for writing data to a contiguous range of memory.
420///
421/// The memory range write by `VmWriter` can be in either kernel space or user space.
422/// When the operating range is in kernel space, the memory within that range
423/// is guaranteed to be valid, and the corresponding memory writes are infallible.
424/// When the operating range is in user space, it is ensured that the page table of
425/// the process creating the `VmWriter` is active for the duration of `'a`,
426/// and the corresponding memory writes are considered fallible.
427///
428/// When perform writing with a `VmReader`, if one of them represents typed memory,
429/// it can ensure that the writing range in this writer and reading range in the
430/// reader are not overlapped.
431///
432/// NOTE: The overlap mentioned above is at both the virtual address level
433/// and physical address level. There is not guarantee for the operation results
434/// of `VmReader` and `VmWriter` in overlapping untyped addresses, and it is
435/// the user's responsibility to handle this situation.
436pub struct VmWriter<'a  /*, Fallibility = Fallible*/ > {
437    pub id: Ghost<nat>,
438    pub cursor: VirtPtr,
439    pub end: VirtPtr,
440    pub phantom: PhantomData<&'a [u8]  /*, Fallibility)*/ >,
441}
442
443#[verus_verify]
444impl<'a> VmWriter<'a  /* Infallible */ > {
445    /// Constructs a `VmWriter` from a pointer and a length, which represents
446    /// a memory range in kernel space.
447    ///
448    /// # Safety
449    ///
450    /// `ptr` must be [valid] for writes of `len` bytes during the entire lifetime `a`.
451    ///
452    /// [valid]: crate::mm::io#safety
453    #[verus_spec(r =>
454        with
455            Ghost(id): Ghost<nat>,
456            Tracked(fallible): Tracked<bool>,
457                -> owner: Tracked<VmIoOwner<'a>>,
458        requires
459            !fallible,
460            ptr.inv(),
461            ptr.range@.start == ptr.vaddr,
462            len == ptr.range@.end - ptr.range@.start,
463            len == 0 || KERNEL_BASE_VADDR() <= ptr.vaddr,
464            len == 0 || ptr.vaddr + len <= KERNEL_END_VADDR(),
465        ensures
466            owner@.inv(),
467            owner@.inv_with_writer(r),
468            owner@.is_fallible == fallible,
469            owner@.id == id,
470            owner@.is_kernel,
471            owner@.mem_view is None,
472            r.cursor == ptr,
473            r.end.vaddr == ptr.vaddr + len,
474            r.cursor.range@ == ptr.range@,
475            r.end.range@ == ptr.range@,
476    )]
477    pub unsafe fn from_kernel_space(ptr: VirtPtr, len: usize) -> Self {
478        let ghost range = ptr.range@;
479        let tracked owner = VmIoOwner {
480            id: Ghost(id),
481            range: Ghost(range),
482            is_fallible: fallible,
483            phantom: PhantomData,
484            is_kernel: true,
485            mem_view: None,
486        };
487
488        let ghost range = ptr.vaddr..(ptr.vaddr + len) as usize;
489        let end = VirtPtr { vaddr: ptr.vaddr + len, range: Ghost(range) };
490
491        proof_with!(|= Tracked(owner));
492        Self { id: Ghost(id), cursor: ptr, end, phantom: PhantomData }
493    }
494
495    #[verus_spec(r =>
496        with
497            Ghost(id): Ghost<nat>,
498            -> owner: Tracked<Result<VmIoOwner<'a>>>,
499        ensures
500            r is Ok <==> owner@ is Ok,
501            r matches Ok(r) ==> {
502                &&& owner@ matches Ok(owner) ==> {
503                    &&& r.inv()
504                    &&& r.avail_spec() == core::mem::size_of::<T>()
505                    &&& owner.inv()
506                    &&& owner.inv_with_writer(r)
507                }
508            }
509    )]
510    pub fn from_pod<T: Pod>(mut val: T) -> Result<VmWriter<'a  /* Infallible */ >> {
511        proof_decl! {
512            let tracked mut perm;
513        }
514
515        let (pnt, len) = val.as_bytes_mut();
516
517        if len != 0 && (pnt < KERNEL_BASE_VADDR() || len >= KERNEL_END_VADDR() || pnt
518            > KERNEL_END_VADDR() - len) || pnt == 0 {
519            proof_with!(|= Tracked(Err(Error::IoError)));
520            Err(Error::IoError)
521        } else {
522            let ghost range = pnt..(pnt + len) as usize;
523            let vptr = VirtPtr { vaddr: pnt as usize, range: Ghost(range) };
524            let r = unsafe {
525                #[verus_spec(with Ghost(id), Tracked(false) => Tracked(perm))]
526                VmWriter::from_kernel_space(vptr, len)
527            };
528
529            proof_with!(|= Tracked(Ok(perm)));
530            Ok(r)
531        }
532    }
533}
534
535// `Clone` can be implemented for `VmReader`
536// because it either points to untyped memory or represents immutable references.
537// Note that we cannot implement `Clone` for `VmWriter`
538// because it can represent mutable references, which must remain exclusive.
539impl Clone for VmReader<'_  /* Fallibility */ > {
540    fn clone(&self) -> Self {
541        Self { id: self.id, cursor: self.cursor, end: self.end, phantom: PhantomData }
542    }
543}
544
545#[verus_verify]
546impl<'a> VmReader<'a  /* Infallible */ > {
547    /// Constructs a `VmReader` from a pointer and a length, which represents
548    /// a memory range in USER space.
549    #[verifier::external_body]
550    #[verus_spec(r =>
551        with
552            Ghost(id): Ghost<nat>,
553            -> owner: Tracked<VmIoOwner<'a>>,
554        requires
555            ptr.inv(),
556        ensures
557            r.inv(),
558            owner@.id == id,
559            owner@.inv(),
560            owner@.inv_with_reader(r),
561            owner@.range == ptr.range@,
562            owner@.mem_view is None,
563            !owner@.is_kernel,
564            !owner@.is_fallible,
565    )]
566    pub unsafe fn from_user_space(ptr: VirtPtr) -> Self {
567        // SAFETY: The caller must ensure the safety requirements.
568        unimplemented!()
569    }
570
571    /// Constructs a `VmReader` from a pointer and a length, which represents
572    /// a memory range in kernel space.
573    ///
574    /// # Safety
575    ///
576    /// `ptr` must be [valid] for reads of `len` bytes during the entire lifetime `a`.
577    ///
578    /// [valid]: crate::mm::io#safety
579    #[verus_spec(r =>
580        with
581            Ghost(id): Ghost<nat>,
582            -> owner: Tracked<VmIoOwner<'a>>,
583        requires
584            ptr.inv(),
585            ptr.range@.start == ptr.vaddr,
586            len == ptr.range@.end - ptr.range@.start,
587            len == 0 || KERNEL_BASE_VADDR() <= ptr.vaddr,
588            len == 0 || ptr.vaddr + len <= KERNEL_END_VADDR(),
589        ensures
590            owner@.inv(),
591            owner@.inv_with_reader(r),
592            r.cursor.vaddr == ptr.vaddr,
593            r.end.vaddr == ptr.vaddr + len,
594            r.cursor == ptr,
595            r.end.range@ == ptr.range@,
596            owner@.is_kernel,
597            owner@.id == id,
598    )]
599    pub unsafe fn from_kernel_space(ptr: VirtPtr, len: usize) -> Self {
600        let tracked owner = VmIoOwner {
601            id: Ghost(id),
602            range: Ghost(ptr.vaddr..(ptr.vaddr + len) as usize),
603            is_fallible: false,
604            phantom: PhantomData,
605            is_kernel: true,
606            // This is left empty as will be populated later.
607            mem_view: None,
608        };
609
610        let ghost range = ptr.vaddr..(ptr.vaddr + len) as usize;
611        let end = VirtPtr { vaddr: ptr.vaddr + len, range: Ghost(range) };
612
613        proof_with!(|= Tracked(owner));
614        Self { id: Ghost(id), cursor: ptr, end, phantom: PhantomData }
615    }
616
617    #[verus_spec(r =>
618        with
619            Ghost(id): Ghost<nat>,
620            -> owner: Tracked<Result<VmIoOwner<'a>>>,
621        ensures
622            r is Ok <==> owner@ is Ok,
623            r matches Ok(r) ==> {
624                &&& owner@ matches Ok(owner) ==> {
625                    &&& r.inv()
626                    &&& r.remain_spec() == core::mem::size_of::<T>()
627                    &&& owner.inv()
628                    &&& owner.inv_with_reader(r)
629                }
630            }
631    )]
632    pub fn from_pod<T: Pod>(val: &mut T) -> Result<VmReader<'a  /* Infallible */ >> {
633        proof_decl! {
634            let tracked mut perm;
635        }
636
637        let (pnt, len) = val.as_bytes_mut();
638
639        if len != 0 && (pnt < KERNEL_BASE_VADDR() || len >= KERNEL_END_VADDR() || pnt
640            > KERNEL_END_VADDR() - len) || pnt == 0 {
641            proof_with!(|= Tracked(Err(Error::IoError)));
642            Err(Error::IoError)
643        } else {
644            let ghost range = pnt..(pnt + len) as usize;
645            let vptr = VirtPtr { vaddr: pnt, range: Ghost(range) };
646            let r = unsafe {
647                #[verus_spec(with Ghost(id) => Tracked(perm))]
648                VmReader::from_kernel_space(vptr, len)
649            };
650
651            proof {
652                assert(r.inv());
653                assert(perm.inv());
654            }
655
656            proof_with!(|= Tracked(Ok(perm)));
657            Ok(r)
658        }
659    }
660}
661
662#[verus_verify]
663impl<'a> TryFrom<&'a [u8]> for VmReader<'a  /* Infallible */ > {
664    type Error = Error;
665
666    #[verus_spec()]
667    fn try_from(slice: &'a [u8]) -> Result<Self> {
668        proof_decl! {
669            let tracked mut perm;
670        }
671
672        let addr = slice.as_ptr() as usize;
673
674        if slice.len() != 0 && (addr < KERNEL_BASE_VADDR() || slice.len() >= KERNEL_END_VADDR()
675            || addr > KERNEL_END_VADDR() - slice.len()) || addr == 0 {
676            return Err(Error::IoError);
677        }
678        // SAFETY:
679        // - The memory range points to typed memory.
680        // - The validity requirements for read accesses are met because the pointer is converted
681        //   from an immutable reference that outlives the lifetime `'a`.
682        // - The type, i.e., the `u8` slice, is plain-old-data.
683
684        let ghost range = addr..(addr + slice.len()) as usize;
685        let vptr = VirtPtr { vaddr: addr, range: Ghost(range) };
686
687        Ok(
688            unsafe {
689                #[verus_spec(with Ghost(arbitrary()) => Tracked(perm))]
690                Self::from_kernel_space(vptr, slice.len())
691            },
692        )
693    }
694}
695
696impl<'a> TryFromSpecImpl<&'a [u8]> for VmReader<'a> {
697    open spec fn obeys_try_from_spec() -> bool {
698        true
699    }
700
701    open spec fn try_from_spec(slice: &'a [u8]) -> Result<Self> {
702        let addr = slice.as_ptr() as usize;
703        let len = slice.len();
704
705        if len != 0 && (addr < KERNEL_BASE_VADDR() || len >= KERNEL_END_VADDR() || addr
706            > KERNEL_END_VADDR() - slice.len()) || addr == 0 {
707            Err(Error::IoError)
708        } else {
709            Ok(
710                Self {
711                    // Id is not important here.
712                    id: Ghost(arbitrary()),
713                    cursor: VirtPtr { vaddr: addr, range: Ghost(addr..(addr + len) as usize) },
714                    end: VirtPtr {
715                        vaddr: (addr + len) as usize,
716                        range: Ghost(addr..(addr + len) as usize),
717                    },
718                    phantom: PhantomData,
719                },
720            )
721        }
722    }
723}
724
725// Perhaps we can implement `tryfrom` instead.
726// This trait method should be discarded as we do not want to make VmWriter <N> ?
727#[verus_verify]
728impl<'a> TryFrom<&'a [u8]> for VmWriter<'a  /* Infallible */ > {
729    type Error = Error;
730
731    // fn try_from(slice: ArrayPtr<u8, N>, Tracked(owner))??
732    #[verus_spec()]
733    fn try_from(slice: &'a [u8]  /* length... */ ) -> Result<Self> {
734        proof_decl! {
735            let tracked mut perm;
736        }
737
738        let addr = slice.as_ptr() as usize;
739
740        if slice.len() != 0 && (addr < KERNEL_BASE_VADDR() || slice.len() >= KERNEL_END_VADDR()
741            || addr > KERNEL_END_VADDR() - slice.len()) || addr == 0 {
742            return Err(Error::IoError);
743        }
744        // SAFETY:
745        // - The memory range points to typed memory.
746        // - The validity requirements for write accesses are met because the pointer is converted
747        //   from a mutable reference that outlives the lifetime `'a`.
748        // - The type, i.e., the `u8` slice, is plain-old-data.
749
750        let ghost range = addr..(addr + slice.len()) as usize;
751        let vptr = VirtPtr { vaddr: addr, range: Ghost(range) };
752
753        proof {
754            assert(vptr.inv());
755        }
756
757        Ok(
758            unsafe {
759                #[verus_spec(with Ghost(arbitrary()), Tracked(false) => Tracked(perm))]
760                Self::from_kernel_space(vptr, slice.len())
761            },
762        )
763    }
764}
765
766impl<'a> TryFromSpecImpl<&'a [u8]> for VmWriter<'a> {
767    open spec fn obeys_try_from_spec() -> bool {
768        true
769    }
770
771    open spec fn try_from_spec(slice: &'a [u8]) -> Result<Self> {
772        let addr = slice.as_ptr() as usize;
773        let len = slice.len();
774
775        if len != 0 && (addr < KERNEL_BASE_VADDR() || len >= KERNEL_END_VADDR() || addr
776            > KERNEL_END_VADDR() - slice.len()) || addr == 0 {
777            Err(Error::IoError)
778        } else {
779            Ok(
780                Self {
781                    id: Ghost(arbitrary()),
782                    cursor: VirtPtr { vaddr: addr, range: Ghost(addr..(addr + len) as usize) },
783                    end: VirtPtr {
784                        vaddr: (addr + len) as usize,
785                        range: Ghost(addr..(addr + len) as usize),
786                    },
787                    phantom: PhantomData,
788                },
789            )
790        }
791
792    }
793}
794
795type Result<T> = core::result::Result<T, Error>;
796
797/// A trait that enables reading/writing data from/to a VM object,
798/// e.g., [`USegment`], [`Vec<UFrame>`] and [`UFrame`].
799///
800/// # Concurrency
801///
802/// The methods may be executed by multiple concurrent reader and writer
803/// threads. In this case, if the results of concurrent reads or writes
804/// desire predictability or atomicity, the users should add extra mechanism
805/// for such properties.
806///
807/// [`USegment`]: crate::mm::USegment
808/// [`UFrame`]: crate::mm::UFrame
809///
810/// Note: In this trait we follow the standard of `vstd` trait that allows precondition and
811/// postcondition overriding by introducing `obeys_`, `_requires`, and `_ensures` spec functions.
812///
813/// `P` is the type of the permission/ownership token used to track the state of the VM object.
814pub trait VmIo<P: Sized>: Send + Sync + Sized {
815    // spec fn reader_inv_with_owner(self, owner: VmIoOwner<'_>) -> bool;
816    /// If this returns true then the `requires` clause of `read` will be active.
817    spec fn obeys_vmio_read_requires() -> bool;
818
819    /// If this returns true then the `ensures` clause of `read` will be active.
820    spec fn obeys_vmio_read_ensures() -> bool;
821
822    /// If this returns true then the `requires` clause of `write` will be active.
823    spec fn obeys_vmio_write_requires() -> bool;
824
825    /// If this returns true then the `ensures` clause of `write` will be active.
826    spec fn obeys_vmio_write_ensures() -> bool;
827
828    /// Checks whether the preconditions for `read` are met.
829    spec fn vmio_read_requires(self, owner: P, offset: usize) -> bool;
830
831    /// Checks whether the postconditions for `read` are met.
832    spec fn vmio_read_ensures(self, owner: P, offset: usize, new_owner: P, r: Result<()>) -> bool;
833
834    /// Checks whether the preconditions for `write` are met.
835    spec fn vmio_write_requires(self, owner: P, offset: usize) -> bool;
836
837    /// Checks whether the postconditions for `write` are met.
838    spec fn vmio_write_ensures(self, owner: P, offset: usize, new_owner: P, r: Result<()>) -> bool;
839
840    fn read_slice<T: Pod, const N: usize>(
841        &self,
842        offset: usize,
843        slice: ArrayPtr<T, N>,
844        Tracked(slice_owner): Tracked<&mut PointsToArray<u8, N>>,
845        Tracked(owner): Tracked<&mut P>,
846    ) -> (r: Result<()>);
847
848    fn write_bytes<const N: usize>(
849        &self,
850        offset: usize,
851        bytes: ArrayPtr<u8, N>,
852        Tracked(bytes_owner): Tracked<&mut PointsToArray<u8, N>>,
853        Tracked(owner): Tracked<&mut P>,
854    ) -> (r: Result<()>)
855    // requires
856    //     Self::obeys_vmio_write_requires() ==> Self::vmio_write_requires(
857    //         *self,
858    //         *old(owner),
859    //         offset,
860    //     ),
861    // ensures
862    //     Self::obeys_vmio_write_ensures() ==> Self::vmio_write_ensures(
863    //         *self,
864    //         *old(owner),
865    //         offset,
866    //         *owner,
867    //         r,
868    //     ),
869    ;
870
871    fn write_val<T: Pod>(&self, offset: usize, val: T, Tracked(owner): Tracked<&mut P>) -> (r:
872        Result<()>)
873    // requires
874    //     Self::obeys_vmio_write_requires() ==> Self::vmio_write_requires(
875    //         *self,
876    //         *old(owner),
877    //         offset,
878    //     ),
879    // ensures
880    //     Self::obeys_vmio_write_ensures() ==> Self::vmio_write_ensures(
881    //         *self,
882    //         *old(owner),
883    //         offset,
884    //         *owner,
885    //         r,
886    //     ),
887    ;
888
889    fn write_slice<T: Pod, const N: usize>(
890        &self,
891        offset: usize,
892        slice: ArrayPtr<T, N>,
893        Tracked(slice_owner): Tracked<&mut PointsToArray<T, N>>,
894        Tracked(owner): Tracked<&mut P>,
895    ) -> (r: Result<()>)
896        requires
897            Self::obeys_vmio_write_requires() ==> Self::vmio_write_requires(
898                *self,
899                *old(owner),
900                offset,
901            ),
902        ensures
903            Self::obeys_vmio_write_ensures() ==> Self::vmio_write_ensures(
904                *self,
905                *old(owner),
906                offset,
907                *owner,
908                r,
909            ),
910    ;
911}
912
913/// A trait that enables reading/writing data from/to a VM object using one non-tearing memory
914/// load/store.
915///
916/// See also [`VmIo`], which enables reading/writing data from/to a VM object without the guarantee
917/// of using one non-tearing memory load/store.
918pub trait VmIoOnce: Sized {
919    spec fn obeys_vmio_once_read_requires() -> bool;
920
921    spec fn obeys_vmio_once_write_requires() -> bool;
922
923    spec fn obeys_vmio_once_read_ensures() -> bool;
924
925    spec fn obeys_vmio_once_write_ensures() -> bool;
926
927    /// Reads a value of the `PodOnce` type at the specified offset using one non-tearing memory
928    /// load.
929    ///
930    /// Except that the offset is specified explicitly, the semantics of this method is the same as
931    /// [`VmReader::read_once`].
932    fn read_once<T: PodOnce>(&self, offset: usize) -> Result<T>;
933
934    /// Writes a value of the `PodOnce` type at the specified offset using one non-tearing memory
935    /// store.
936    ///
937    /// Except that the offset is specified explicitly, the semantics of this method is the same as
938    /// [`VmWriter::write_once`].
939    fn write_once<T: PodOnce>(&self, offset: usize, new_val: &T) -> Result<()>;
940}
941
942#[verus_verify]
943impl VmReader<'_> {
944    pub open spec fn remain_spec(&self) -> usize {
945        (self.end.vaddr - self.cursor.vaddr) as usize
946    }
947
948    /// Returns the number of remaining bytes that can be read.
949    #[inline]
950    #[verus_spec(r =>
951        requires
952            self.inv(),
953        ensures
954            r == self.remain_spec(),
955    )]
956    #[verifier::when_used_as_spec(remain_spec)]
957    pub fn remain(&self) -> usize {
958        self.end.vaddr - self.cursor.vaddr
959    }
960
961    /// Advances the cursor by `len` bytes. Requires that there are at least `len` bytes remaining.
962    #[inline]
963    #[verus_spec(
964        requires
965            old(self).inv(),
966            len <= old(self).remain_spec(),
967        ensures
968            self.inv(),
969            self.cursor.vaddr == old(self).cursor.vaddr + len,
970            self.remain_spec() == old(self).remain_spec() - len,
971            self.id == old(self).id,
972            self.end == old(self).end,
973    )]
974    pub fn advance(&mut self, len: usize) {
975        self.cursor.vaddr = self.cursor.vaddr + len;
976    }
977
978    /// Reads all data into the writer until one of the two conditions is met:
979    /// 1. The reader has no remaining data.
980    /// 2. The writer has no available space.
981    ///
982    /// Returns the number of bytes read.
983    #[verus_spec(r =>
984        with
985            Tracked(owner_r): Tracked<&mut VmIoOwner<'_>>,
986            Tracked(owner_w): Tracked<&mut VmIoOwner<'_>>,
987        requires
988            old(self).inv(),
989            old(writer).inv(),
990            old(owner_r).inv(),
991            old(owner_w).inv(),
992            old(owner_r).inv_with_reader(*old(self)),
993            old(owner_w).inv_with_writer(*old(writer)),
994            old(owner_r).mem_view is Some,
995            old(owner_w).mem_view matches Some(VmIoMemView::WriteView(_)),
996            // Non-overlapping requirements.
997            old(writer).cursor.range@.start >= old(self).cursor.range@.end
998                || old(self).cursor.range@.start >= old(writer).cursor.range@.end,
999        ensures
1000            self.inv(),
1001            writer.inv(),
1002            owner_r.inv(),
1003            owner_w.inv(),
1004            owner_r.inv_with_reader(*self),
1005            owner_w.inv_with_writer(*writer),
1006            r == vstd::math::min(old(self).remain_spec() as int, old(writer).avail_spec() as int),
1007            self.remain_spec() == old(self).remain_spec() - r as usize,
1008            self.cursor.vaddr == old(self).cursor.vaddr + r as usize,
1009            writer.avail_spec() == old(writer).avail_spec() - r as usize,
1010            writer.cursor.vaddr == old(writer).cursor.vaddr + r as usize,
1011    )]
1012    pub fn read(&mut self, writer: &mut VmWriter) -> usize {
1013        let mut copy_len = if self.remain() < writer.avail() {
1014            self.remain()
1015        } else {
1016            writer.avail()
1017        };
1018
1019        if copy_len == 0 {
1020            return 0;
1021        }
1022        // Now `memcpy` becomes a `safe` APIs since now we have the tracked permissions
1023        // for both reader and writer to guarantee that the memory accesses are valid.
1024        //
1025        // This is equivalent to: memcpy(writer.cursor.vaddr, self.cursor.vaddr, copy_len);
1026        // TODO: Still wait for VirtPtr to finish their implementation.
1027
1028        let tracked mut mv = match owner_w.mem_view.tracked_take() {
1029            VmIoMemView::WriteView(mv) => mv,
1030            _ => { proof_from_false() },
1031        };
1032
1033        self.advance(copy_len);
1034        writer.advance(copy_len);
1035
1036        proof {
1037            owner_w.mem_view = Some(VmIoMemView::WriteView(mv));
1038            owner_w.advance(copy_len);
1039            owner_r.advance(copy_len);
1040        }
1041
1042        copy_len
1043    }
1044
1045    /// Reads a value of `Pod` type.
1046    ///
1047    /// If the length of the `Pod` type exceeds `self.remain()`,
1048    /// this method will return `Err`.
1049    #[verifier::external_body]
1050    #[verus_spec(r =>
1051        with
1052            Ghost(id): Ghost<nat>,
1053            Tracked(owner): Tracked<&mut VmIoOwner<'_>>,
1054        requires
1055            old(self).inv(),
1056            old(owner).inv(),
1057            old(owner).inv_with_reader(*old(self)),
1058            old(owner).mem_view is Some,
1059        ensures
1060            self.inv(),
1061            owner.inv(),
1062            owner.inv_with_reader(*self),
1063            owner.params_eq(*old(owner)),
1064            match r {
1065                Ok(_) => {
1066                    &&& self.remain_spec() == old(self).remain_spec() - core::mem::size_of::<T>()
1067                    &&& self.cursor.vaddr == old(self).cursor.vaddr + core::mem::size_of::<T>()
1068                },
1069                Err(_) => {
1070                    *old(self) == *self
1071                },
1072            }
1073    )]
1074    pub fn read_val<T: Pod>(&mut self) -> Result<T> {
1075        if self.remain() < core::mem::size_of::<T>() {
1076            return Err(Error::InvalidArgs);
1077        }
1078        let mut v = T::new_uninit();
1079        proof_with!(Ghost(id) => Tracked(owner_w));
1080        let mut writer = VmWriter::from_pod(v)?;
1081
1082        let tracked mut owner_w = owner_w.tracked_unwrap();
1083
1084        proof_with!(Tracked(owner), Tracked(&mut owner_w));
1085        self.read(&mut writer);
1086
1087        Ok(v)
1088    }
1089
1090    #[verifier::external_body]
1091    #[verus_spec(
1092        requires
1093            self.inv(),
1094            core::mem::size_of::<T>() <= self.remain_spec(),
1095    )]
1096    fn read_once_inner<T: PodOnce>(&self) -> T {
1097        let pnt = self.cursor.vaddr as *const T;
1098        unsafe { pnt.read_volatile() }
1099    }
1100
1101    /// Reads a value of the `PodOnce` type using one non-tearing memory load.
1102    ///
1103    /// If the length of the `PodOnce` type exceeds `self.remain()`, this method will return `Err`.
1104    ///
1105    /// This method will not compile if the `Pod` type is too large for the current architecture
1106    /// and the operation must be tear into multiple memory loads.
1107    ///
1108    /// # Panics
1109    ///
1110    /// This method will panic if the current position of the reader does not meet the alignment
1111    /// requirements of type `T`.
1112    #[verus_spec(r =>
1113        with
1114            Tracked(owner): Tracked<&mut VmIoOwner<'_>>,
1115        requires
1116            old(self).inv(),
1117            old(owner).mem_view is Some,
1118            old(owner).inv(),
1119            old(owner).inv_with_reader(*old(self)),
1120        ensures
1121            self.inv(),
1122            owner.inv(),
1123            owner.inv_with_reader(*self),
1124            match r {
1125                Ok(_) => {
1126                    &&& self.remain_spec() == old(self).remain_spec() - core::mem::size_of::<T>()
1127                    &&& self.cursor.vaddr == old(self).cursor.vaddr + core::mem::size_of::<T>()
1128                },
1129                Err(_) => {
1130                    *old(self) == *self
1131                },
1132            }
1133    )]
1134    pub fn read_val_once<T: PodOnce>(&mut self) -> Result<T> {
1135        if core::mem::size_of::<T>() > self.remain() {
1136            return Err(Error::InvalidArgs);
1137        }
1138        // SAFETY: We have checked that the number of bytes remaining is at least the size of `T`
1139        // and that the cursor is properly aligned with respect to the type `T`. All other safety
1140        // requirements are the same as for `Self::read`.
1141
1142        let v = self.read_once_inner::<T>();
1143        self.advance(core::mem::size_of::<T>());
1144
1145        proof {
1146            owner.advance(core::mem::size_of::<T>());
1147        }
1148
1149        Ok(v)
1150    }
1151}
1152
1153#[verus_verify]
1154impl<'a> VmWriter<'a> {
1155    pub open spec fn avail_spec(&self) -> usize {
1156        (self.end.vaddr - self.cursor.vaddr) as usize
1157    }
1158
1159    #[verifier::external_body]
1160    #[verus_spec(r =>
1161        with
1162            Ghost(id): Ghost<nat>,
1163            -> owner: Tracked<VmIoOwner<'a>>,
1164        requires
1165            ptr.inv(),
1166        ensures
1167            r.inv(),
1168            owner@.id == id,
1169            owner@.inv(),
1170            owner@.inv_with_writer(r),
1171            owner@.range == ptr.range@,
1172            owner@.mem_view is None,
1173            !owner@.is_kernel,
1174            !owner@.is_fallible,
1175    )]
1176    pub unsafe fn from_user_space(ptr: VirtPtr) -> Self {
1177        // SAFETY: The caller must ensure the safety requirements.
1178        unimplemented!()
1179    }
1180
1181    /// Returns the number of available bytes that can be written.
1182    ///
1183    /// This has the same implementation as [`VmReader::remain`] but semantically
1184    /// they are different.
1185    #[inline]
1186    #[verus_spec(r =>
1187        requires
1188            self.inv(),
1189        ensures
1190            r == self.avail_spec(),
1191    )]
1192    #[verifier::when_used_as_spec(avail_spec)]
1193    pub fn avail(&self) -> usize {
1194        self.end.vaddr - self.cursor.vaddr
1195    }
1196
1197    /// Advances the cursor by `len` bytes. Requires that there are at least `len` bytes available.
1198    #[inline]
1199    #[verus_spec(
1200        requires
1201            old(self).inv(),
1202            len <= old(self).avail_spec(),
1203        ensures
1204            self.inv(),
1205            self.avail_spec() == old(self).avail_spec() - len,
1206            self.cursor.vaddr == old(self).cursor.vaddr + len,
1207            self.cursor.range@ == old(self).cursor.range@,
1208            self.id == old(self).id,
1209            self.end == old(self).end,
1210            self.cursor.range@ == old(self).cursor.range@,
1211    )]
1212    pub fn advance(&mut self, len: usize) {
1213        self.cursor.vaddr = self.cursor.vaddr + len;
1214    }
1215
1216    /// Writes all data from the reader until one of the two conditions is met:
1217    /// 1. The reader has no remaining data.
1218    /// 2. The writer has no available space.
1219    ///
1220    /// Returns the number of bytes written.
1221    #[inline]
1222    #[verus_spec(r =>
1223        with
1224            Tracked(owner_w): Tracked<&mut VmIoOwner<'_>>,
1225            Tracked(owner_r): Tracked<&mut VmIoOwner<'_>>,
1226        requires
1227            old(self).inv(),
1228            old(reader).inv(),
1229            old(owner_w).inv(),
1230            old(owner_r).inv(),
1231            old(owner_w).inv_with_writer(*old(self)),
1232            old(owner_r).inv_with_reader(*old(reader)),
1233            old(owner_r).mem_view is Some,
1234            old(owner_w).mem_view matches Some(VmIoMemView::WriteView(_)),
1235            // Non-overlapping requirements.
1236            old(self).cursor.range@.start >= old(reader).cursor.range@.end
1237                || old(reader).cursor.range@.start >= old(self).cursor.range@.end,
1238        ensures
1239            self.inv(),
1240            reader.inv(),
1241            owner_w.inv(),
1242            owner_r.inv(),
1243            owner_w.inv_with_writer(*self),
1244            owner_r.inv_with_reader(*reader),
1245            r == vstd::math::min(old(self).avail_spec() as int, old(reader).remain_spec() as int),
1246            self.avail_spec() == old(self).avail_spec() - r as usize,
1247            self.cursor.vaddr == old(self).cursor.vaddr + r as usize,
1248            reader.remain_spec() == old(reader).remain_spec() - r as usize,
1249            reader.cursor.vaddr == old(reader).cursor.vaddr + r as usize,
1250    )]
1251    pub fn write(&mut self, reader: &mut VmReader) -> usize {
1252        proof_with!(Tracked(owner_r), Tracked(owner_w));
1253        reader.read(self)
1254    }
1255
1256    /// Writes a value of `Pod` type.
1257    ///
1258    /// If the length of the `Pod` type exceeds `self.avail()`,
1259    /// this method will return `Err`.
1260    #[verifier::external_body]
1261    #[verus_spec(r =>
1262        with
1263            Ghost(id): Ghost<nat>,
1264            Tracked(owner_w): Tracked<&mut VmIoOwner<'_>>,
1265            Tracked(memview_r): Tracked<&MemView>,
1266        requires
1267            old(self).inv(),
1268            old(owner_w).inv(),
1269            old(owner_w).inv_with_writer(*old(self)),
1270            old(owner_w).mem_view is Some,
1271        ensures
1272            self.inv(),
1273            owner_w.inv(),
1274            owner_w.inv_with_writer(*self),
1275            owner_w.params_eq(*old(owner_w)),
1276            match r {
1277                Ok(_) => {
1278                    &&& self.avail_spec() == old(self).avail_spec() - core::mem::size_of::<T>()
1279                    &&& self.cursor.vaddr == old(self).cursor.vaddr + core::mem::size_of::<T>()
1280                },
1281                Err(_) => {
1282                    *old(self) == *self
1283                },
1284            }
1285    )]
1286    pub fn write_val<T: Pod>(&mut self, val: &mut T) -> Result<()> {
1287        if self.avail() < core::mem::size_of::<T>() {
1288            return Err(Error::InvalidArgs);
1289        }
1290        proof_with!(Ghost(id) => Tracked(owner_r));
1291        let mut reader = VmReader::from_pod(val)?;
1292
1293        let tracked mut owner_r = owner_r.tracked_unwrap();
1294
1295        proof_with!(Tracked(owner_w), Tracked(&mut owner_r));
1296        self.write(&mut reader);
1297
1298        Ok(())
1299    }
1300
1301    #[verifier::external_body]
1302    #[verus_spec(
1303        requires
1304            self.inv(),
1305            core::mem::size_of::<T>() <= self.avail_spec(),
1306    )]
1307    fn write_once_inner<T: PodOnce>(&self, new_val: &mut T) {
1308        let cursor = self.cursor.vaddr as *mut T;
1309        unsafe { cursor.write_volatile(*new_val) };
1310    }
1311
1312    #[verus_spec(r =>
1313        with
1314            Tracked(owner_w): Tracked<&mut VmIoOwner<'_>>,
1315        requires
1316            old(self).inv(),
1317            old(owner_w).mem_view is Some,
1318            old(owner_w).inv(),
1319            old(owner_w).inv_with_writer(*old(self)),
1320        ensures
1321            self.inv(),
1322            owner_w.inv(),
1323            owner_w.inv_with_writer(*self),
1324            match r {
1325                Ok(_) => {
1326                    &&& self.avail_spec() == old(self).avail_spec() - core::mem::size_of::<T>()
1327                    &&& self.cursor.vaddr == old(self).cursor.vaddr + core::mem::size_of::<T>()
1328                },
1329                Err(_) => {
1330                    *old(self) == *self
1331                },
1332            }
1333    )]
1334    pub fn write_once<T: PodOnce>(&mut self, new_val: &mut T) -> Result<()> {
1335        if core::mem::size_of::<T>() > self.avail() {
1336            return Err(Error::InvalidArgs);
1337        }
1338        // SAFETY: We have checked that the number of bytes available is at least the size of `T`
1339        // and that the cursor is properly aligned with respect to the type `T`. All other safety
1340        // requirements are the same as for `Self::write`.
1341
1342        self.write_once_inner::<T>(new_val);
1343        self.advance(core::mem::size_of::<T>());
1344
1345        proof {
1346            owner_w.advance(core::mem::size_of::<T>());
1347        }
1348
1349        Ok(())
1350    }
1351}
1352
1353} // verus!