ostd/mm/io.rs
1// SPDX-License-Identifier: MPL-2.0
2//! Abstractions for reading and writing virtual memory (VM) objects.
3//!
4//! # Safety
5//!
6//! The core virtual memory (VM) access APIs provided by this module are [`VmReader`] and
7//! [`VmWriter`], which allow for writing to or reading from a region of memory _safely_.
8//! `VmReader` and `VmWriter` objects can be constructed from memory regions of either typed memory
9//! (e.g., `&[u8]`) or untyped memory (e.g, [`UFrame`]). Behind the scene, `VmReader` and `VmWriter`
10//! must be constructed via their [`from_user_space`] and [`from_kernel_space`] methods, whose
11//! safety depends on whether the given memory regions are _valid_ or not.
12//!
13//! [`UFrame`]: crate::mm::UFrame
14//! [`from_user_space`]: `VmReader::from_user_space`
15//! [`from_kernel_space`]: `VmReader::from_kernel_space`
16//!
17//! Here is a list of conditions for memory regions to be considered valid:
18//!
19//! - The memory region as a whole must be either typed or untyped memory, not both typed and
20//! untyped.
21//!
22//! - If the memory region is typed, we require that:
23//! - the [validity requirements] from the official Rust documentation must be met, and
24//! - the type of the memory region (which must exist since the memory is typed) must be
25//! plain-old-data, so that the writer can fill it with arbitrary data safely.
26//!
27//! [validity requirements]: core::ptr#safety
28//!
29//! - If the memory region is untyped, we require that:
30//! - the underlying pages must remain alive while the validity requirements are in effect, and
31//! - the kernel must access the memory region using only the APIs provided in this module, but
32//! external accesses from hardware devices or user programs do not count.
33//!
34//! We have the last requirement for untyped memory to be valid because the safety interaction with
35//! other ways to access the memory region (e.g., atomic/volatile memory loads/stores) is not
36//! currently specified. Tis may be relaxed in the future, if appropriate and necessary.
37//!
38//! Note that data races on untyped memory are explicitly allowed (since pages can be mapped to
39//! user space, making it impossible to avoid data races). However, they may produce erroneous
40//! results, such as unexpected bytes being copied, but do not cause soundness problems.
41use crate::arch::mm::{__memcpy_fallible, __memset_fallible};
42use crate::specs::arch::PAGE_SIZE;
43use core::marker::PhantomData;
44use core::ops::Range;
45use vstd::pervasive::{arbitrary, proof_from_false};
46use vstd::prelude::*;
47use vstd::simple_pptr::*;
48use vstd::std_specs::convert::TryFromSpecImpl;
49use vstd_extra::array_ptr::ArrayPtr;
50use vstd_extra::array_ptr::PointsToArray;
51use vstd_extra::ownership::Inv;
52
53use crate::error::*;
54use crate::mm::kspace::{KERNEL_BASE_VADDR, KERNEL_END_VADDR};
55use crate::mm::pod::{Pod, PodOnce};
56use crate::specs::arch::MAX_USERSPACE_VADDR;
57use crate::specs::mm::virt_mem_newer::{MemView, VirtPtr};
58
59/// Copies `len` bytes from `src` to `dst`, stopping early on page fault.
60/// Returns the number of bytes successfully copied.
61///
62/// # Safety
63/// - `src` must be valid for reads of `len` bytes.
64/// - `dst` must either be valid for writes of `len` bytes or be in user space.
65unsafe fn memcpy_fallible(dst: *mut u8, src: *const u8, len: usize) -> usize {
66 let failed_bytes = unsafe { __memcpy_fallible(dst, src, len) };
67 len - failed_bytes
68}
69
70/// Fills `len` bytes of memory at `dst` with the specified `value`, stopping
71/// early on page fault. Returns the number of bytes successfully set.
72///
73/// # Safety
74/// - `dst` must either be valid for writes of `len` bytes or be in user space.
75unsafe fn memset_fallible(dst: *mut u8, value: u8, len: usize) -> usize {
76 let failed_bytes = unsafe { __memset_fallible(dst, value, len) };
77 len - failed_bytes
78}
79
80verus! {
81
82/// Marker type indicating that VM I/O operations may fail (e.g., user-space access).
83pub struct Fallible {}
84
85/// Marker type indicating that VM I/O operations cannot fail (e.g., kernel-space access).
86pub struct Infallible {}
87
88/// Performs a fallible transfer from `reader` to `writer`.
89///
90/// # Verified Properties
91/// ## Preconditions
92/// - `reader`, `writer`, and their associated owners must satisfy their invariants.
93/// - Each owner must match the corresponding reader or writer.
94/// - Both owners must be marked fallible.
95/// ## Postconditions
96/// - The reader, writer, and both owners still satisfy their invariants.
97/// - The owner parameters tracked by [`VmIoOwner::params_eq`] are preserved for both sides.
98#[verus_spec(r =>
99 with
100 Tracked(owner_r): Tracked<&mut VmIoOwner<'_>>,
101 Tracked(owner_w): Tracked<&mut VmIoOwner<'_>>,
102 requires
103 old(reader).inv(),
104 old(writer).inv(),
105 old(owner_r).inv(),
106 old(owner_w).inv(),
107 old(reader).wf(*old(owner_r)),
108 old(writer).wf(*old(owner_w)),
109 old(owner_r).is_fallible && old(owner_w).is_fallible, // both fallible
110 ensures
111 final(reader).inv(),
112 final(writer).inv(),
113 final(owner_r).inv(),
114 final(owner_w).inv(),
115 final(reader).wf(*final(owner_r)),
116 final(writer).wf(*final(owner_w)),
117 final(owner_r).params_eq(*old(owner_r)),
118 final(owner_w).params_eq(*old(owner_w)),
119 )]
120pub fn rw_fallible(reader: &mut VmReader<'_>, writer: &mut VmWriter<'_>) -> core::result::Result<
121 usize,
122 (Error, usize),
123> {
124 Ok(0) // placeholder.
125
126}
127
128/// Copies `len` bytes from `src` to `dst`.
129///
130/// # Safety
131///
132/// - `src` must be [valid] for reads of `len` bytes.
133/// - `dst` must be [valid] for writes of `len` bytes.
134///
135/// [valid]: crate::mm::io#safety
136#[inline]
137#[verifier::external_body]
138#[verus_spec(
139 requires
140 KERNEL_BASE_VADDR <= dst && dst + len <= KERNEL_END_VADDR &&
141 KERNEL_BASE_VADDR <= src && src + len <= KERNEL_END_VADDR
142)]
143unsafe fn memcpy(dst: usize, src: usize, len: usize) {
144 // This method is implemented by calling `volatile_copy_memory`. Note that even with the
145 // "volatile" keyword, data races are still considered undefined behavior (UB) in both the Rust
146 // documentation and the C/C++ standards. In general, UB makes the behavior of the entire
147 // program unpredictable, usually due to compiler optimizations that assume the absence of UB.
148 // However, in this particular case, considering that the Linux kernel uses the "volatile"
149 // keyword to implement `READ_ONCE`/`WRITE_ONCE`, the compiler is extremely unlikely to
150 // break our code unless it also breaks the Linux kernel.
151 //
152 // For more details and future possibilities, see
153 // <https://github.com/asterinas/asterinas/pull/1001#discussion_r1667317406>.
154 // SAFETY: The safety is guaranteed by the safety preconditions and the explanation above.
155 core::intrinsics::volatile_copy_memory(dst as *mut u8, src as *const u8, len);
156}
157
158/// Reads a `Pod` value directly from a verified memory view.
159///
160/// # Verified Properties
161/// ## Preconditions
162/// - `ptr` must satisfy [`VirtPtr::inv`].
163/// - The readable range `[ptr.vaddr, ptr.vaddr + size_of::<T>())` must fit in `ptr.range@`.
164/// - Every byte in that range must translate in `mem` and be initialized.
165/// ## Postconditions
166/// - The function returns a `T` value reconstructed from the bytes at `ptr`.
167/// ## Safety
168/// - This function is trusted because Verus cannot currently express the raw-pointer read needed
169/// to reconstruct a typed `Pod` value from verified byte-level memory permissions.
170#[verifier::external_body]
171#[verus_spec(
172 with
173 Tracked(mem): Tracked<&MemView>,
174 requires
175 ptr.inv(),
176 core::mem::size_of::<T>() <= ptr.range@.end - ptr.vaddr,
177 forall|i: usize|
178 #![trigger mem.addr_transl(i)]
179 ptr.vaddr <= i < ptr.vaddr + core::mem::size_of::<T>() ==> {
180 &&& mem.addr_transl(i) is Some
181 &&& mem.memory.contains_key(mem.addr_transl(i).unwrap().0)
182 &&& mem.memory[mem.addr_transl(i).unwrap().0].contents[mem.addr_transl(i).unwrap().1 as int] is Init
183 },
184)]
185fn read_pod_from_view<T: Pod>(ptr: VirtPtr) -> T {
186 unsafe { (ptr.vaddr as *const T).read() }
187}
188
189/// Reads a `PodOnce` value using one volatile memory load from a verified memory view.
190///
191/// # Verified Properties
192/// ## Preconditions
193/// - `ptr` must satisfy [`VirtPtr::inv`].
194/// - The readable range `[ptr.vaddr, ptr.vaddr + size_of::<T>())` must fit in `ptr.range@`.
195/// - `ptr.vaddr` must satisfy the alignment requirement of `T`.
196/// - Every byte in that range must translate in `mem` and be initialized.
197/// ## Postconditions
198/// - The function returns a `T` value read from `ptr` using one volatile load.
199/// ## Safety
200/// - This function is trusted because the underlying volatile typed read relies on raw-pointer
201/// operations that Verus does not yet model directly.
202#[verifier::external_body]
203#[verus_spec(
204 with
205 Tracked(mem): Tracked<&MemView>,
206 requires
207 ptr.inv(),
208 core::mem::size_of::<T>() <= ptr.range@.end - ptr.vaddr,
209 ptr.vaddr % core::mem::align_of::<T>() == 0,
210 forall|i: usize|
211 #![trigger mem.addr_transl(i)]
212 ptr.vaddr <= i < ptr.vaddr + core::mem::size_of::<T>() ==> {
213 &&& mem.addr_transl(i) is Some
214 &&& mem.memory.contains_key(mem.addr_transl(i).unwrap().0)
215 &&& mem.memory[mem.addr_transl(i).unwrap().0].contents[mem.addr_transl(i).unwrap().1 as int] is Init
216 },
217)]
218fn read_once_from_view<T: PodOnce>(ptr: VirtPtr) -> T {
219 let pnt = ptr.vaddr as *const T;
220 unsafe { pnt.read_volatile() }
221}
222
223/// [`VmReader`] is a reader for reading data from a contiguous range of memory.
224///
225/// The memory range read by [`VmReader`] can be in either kernel space or user space.
226/// When the operating range is in kernel space, the memory within that range
227/// is guaranteed to be valid, and the corresponding memory reads are infallible.
228/// When the operating range is in user space, it is ensured that the page table of
229/// the process creating the [`VmReader`] is active for the duration of `'a`,
230/// and the corresponding memory reads are considered fallible.
231///
232/// When perform reading with a [`VmWriter`], if one of them represents typed memory,
233/// it can ensure that the reading range in this reader and writing range in the
234/// writer are not overlapped.
235///
236/// NOTE: The overlap mentioned above is at both the virtual address level
237/// and physical address level. There is not guarantee for the operation results
238/// of [`VmReader`] and [`VmWriter`] in overlapping untyped addresses, and it is
239/// the user's responsibility to handle this situation.
240pub struct VmReader<'a, Fallibility = Infallible> {
241 pub id: Ghost<nat>,
242 pub cursor: VirtPtr,
243 pub end: VirtPtr,
244 pub phantom: PhantomData<(&'a [u8], Fallibility)>,
245}
246
247/// The memory view used for VM I/O operations.
248///
249/// The readers can think of this as a wrapped permission tokens for operating with a certain
250/// memory view (see [`MemView`]) "owned" by the [`VmIoOwner`] that they are created from, which
251/// are used for allowing callers to use [`VmReader`] and [`VmWriter`] to perform VM I/O operations.
252///
253/// For writers the memory permission must be exclusive so this enum contains an owned [`MemView`]
254/// for the write view; for readers the memory permission can be shared so this enum contains a
255/// reference to a [`MemView`] for the read view (which can be disabled optionally in [`VmSpaceOwner`]).
256///
257/// ⚠️ WARNING: We do not recommend using this enum directly.
258///
259/// [`VmSpaceOwner`]: crate::mm::vm_space::VmSpaceOwner
260pub tracked enum VmIoMemView<'a> {
261 /// An owned memory for writing.
262 WriteView(MemView),
263 /// A shared memory for reading.
264 ReadView(&'a MemView),
265}
266
267/// The owner of a VM I/O operation, which tracks the memory range and the memory view for the
268/// operation.
269///
270/// Basically the caller should be only interested in this struct when using [`VmReader`] and
271/// [`VmWriter`] to perform VM I/O operations, since the safety of these operations depends on the
272/// validity of the memory range and memory view tracked by this struct.
273pub tracked struct VmIoOwner<'a> {
274 /// The unique identifier of this owner.
275 pub id: Ghost<nat>,
276 /// The virtual address range owned by this owner.
277 pub range: Ghost<Range<usize>>,
278 /// Whether this reader is fallible.
279 pub is_fallible: bool,
280 pub phantom: PhantomData<&'a [u8] /*, Fallibility)*/ >,
281 /// Whether this owner is for kernel space.
282 pub is_kernel: bool,
283 /// The mem view associated with this owner.
284 pub mem_view: Option<VmIoMemView<'a>>,
285}
286
287impl VmIoOwner<'_> {
288 /// Structural well-formedness: the range is ordered.
289 /// Always holds after construction.
290 pub open spec fn inv_wf(self) -> bool {
291 self.range@.start <= self.range@.end
292 }
293}
294
295impl Inv for VmIoOwner<'_> {
296 /// Full invariant: well-formed AND memory view (if present) covers the range.
297 open spec fn inv(self) -> bool {
298 &&& self.inv_wf()
299 &&& match self.mem_view {
300 Some(VmIoMemView::WriteView(mv)) => {
301 &&& mv.mappings.finite()
302 &&& mv.mappings_are_disjoint()
303 &&& forall|va: usize|
304 self.range@.start <= va < self.range@.end ==> {
305 &&& #[trigger] mv.addr_transl(va) is Some
306 }
307 },
308 Some(VmIoMemView::ReadView(mv)) => {
309 &&& mv.mappings.finite()
310 &&& mv.mappings_are_disjoint()
311 &&& forall|va: usize|
312 self.range@.start <= va < self.range@.end ==> {
313 &&& #[trigger] mv.addr_transl(va) is Some
314 }
315 },
316 None => true,
317 }
318 }
319}
320
321impl VmIoOwner<'_> {
322 /// Whether this owner carries a read memory view.
323 #[verifier::inline]
324 pub open spec fn has_read_view(self) -> bool {
325 self.mem_view matches Some(VmIoMemView::ReadView(_))
326 }
327
328 /// Whether this owner carries a write memory view.
329 #[verifier::inline]
330 pub open spec fn has_write_view(self) -> bool {
331 self.mem_view matches Some(VmIoMemView::WriteView(_))
332 }
333
334 /// Whether this owner is ready to serve as the readable side of a copy.
335 #[verifier::inline]
336 pub open spec fn can_read(self) -> bool {
337 self.has_read_view()
338 }
339
340 /// Whether this owner is ready to serve as the writable side of a copy.
341 #[verifier::inline]
342 pub open spec fn can_write(self) -> bool {
343 self.has_write_view()
344 }
345
346 /// Whether the read view initializes the entire owner range.
347 #[verifier::inline]
348 pub open spec fn read_view_initialized(self) -> bool {
349 match self.mem_view {
350 Some(VmIoMemView::ReadView(mem_src)) => {
351 forall|i: usize|
352 #![trigger mem_src.addr_transl(i)]
353 self.range@.start <= i < self.range@.end ==> {
354 &&& mem_src.addr_transl(i) is Some
355 &&& mem_src.memory.contains_key(mem_src.addr_transl(i).unwrap().0)
356 &&& mem_src.memory[mem_src.addr_transl(
357 i,
358 ).unwrap().0].contents[mem_src.addr_transl(i).unwrap().1 as int] is Init
359 }
360 },
361 _ => false,
362 }
363 }
364
365 /// Checks whether this owner overlaps with another owner.
366 #[verifier::inline]
367 pub open spec fn overlaps(self, other: VmIoOwner<'_>) -> bool {
368 !self.disjoint(other)
369 }
370
371 #[verifier::inline]
372 pub open spec fn overlaps_with_range(self, range: Range<usize>) -> bool {
373 &&& self.range@.start <= range.end
374 &&& range.start <= self.range@.end
375 }
376
377 /// Checks whether this owner is disjoint with another owner.
378 #[verifier::inline]
379 pub open spec fn disjoint(self, other: VmIoOwner<'_>) -> bool {
380 &&& !self.overlaps_with_range(other.range@)
381 &&& match (self.mem_view, other.mem_view) {
382 (Some(lhs), Some(rhs)) => match (lhs, rhs) {
383 (VmIoMemView::WriteView(lmv), VmIoMemView::WriteView(rmv)) => {
384 lmv.mappings.disjoint(rmv.mappings)
385 },
386 (VmIoMemView::WriteView(lmv), VmIoMemView::ReadView(rmv)) => {
387 lmv.mappings.disjoint(rmv.mappings)
388 },
389 (VmIoMemView::ReadView(lmv), VmIoMemView::WriteView(rmv)) => {
390 lmv.mappings.disjoint(rmv.mappings)
391 },
392 (VmIoMemView::ReadView(lmv), VmIoMemView::ReadView(rmv)) => {
393 lmv.mappings.disjoint(rmv.mappings)
394 },
395 },
396 _ => true,
397 }
398 }
399
400 #[verifier::inline]
401 pub open spec fn params_eq(self, other: VmIoOwner<'_>) -> bool {
402 &&& self.range@ == other.range@
403 &&& self.is_fallible == other.is_fallible
404 }
405
406 /// Changes the fallibility of this owner.
407 ///
408 /// # Verified Properties
409 /// ## Preconditions
410 /// - The owner must satisfy its invariant.
411 /// - The new fallibility must differ from the current one.
412 /// ## Postconditions
413 /// - The updated owner still satisfies its invariant.
414 /// - The `is_fallible` field is updated to `fallible`.
415 pub proof fn change_fallible(tracked &mut self, tracked fallible: bool)
416 requires
417 old(self).inv(),
418 old(self).is_fallible != fallible,
419 ensures
420 final(self).inv(),
421 final(self).is_fallible == fallible,
422 {
423 self.is_fallible = fallible;
424 }
425
426 /// Advances the cursor of the reader/writer by the given number of bytes.
427 ///
428 /// Note this will return the advanced `VmIoMemView` as the previous permission
429 /// is no longer needed and must be discarded then.
430 ///
431 /// # Verified Properties
432 /// ## Preconditions
433 /// - The owner must satisfy its invariant.
434 /// - The owner must carry a memory view.
435 /// - `nbytes` must not exceed the remaining length of the owned range.
436 /// ## Postconditions
437 /// - The updated owner still satisfies its invariant.
438 /// - The start of the owned range is advanced by `nbytes`.
439 /// - The end of the owned range and the other owner parameters are preserved.
440 /// - The returned [`VmIoMemView`] is the portion advanced past.
441 #[verifier::external_body]
442 pub proof fn advance(tracked &mut self, nbytes: usize) -> (tracked res: VmIoMemView<'_>)
443 requires
444 old(self).inv(),
445 old(self).mem_view is Some,
446 nbytes <= old(self).range@.end - old(self).range@.start,
447 ensures
448 final(self).inv(),
449 final(self).range@.start == old(self).range@.start + nbytes,
450 final(self).range@.end == old(self).range@.end,
451 final(self).is_fallible == old(self).is_fallible,
452 final(self).id == old(self).id,
453 final(self).is_kernel == old(self).is_kernel,
454 old(self).mem_view matches Some(VmIoMemView::ReadView(_)) ==> final(self).mem_view matches Some(VmIoMemView::ReadView(_)),
455 old(self).mem_view matches Some(VmIoMemView::WriteView(_)) ==> final(self).mem_view matches Some(VmIoMemView::WriteView(_)),
456 old(self).read_view_initialized() ==> final(self).read_view_initialized(),
457 {
458 arbitrary()
459 }
460
461 /// Unwraps the read view.
462 ///
463 /// # Verified Properties
464 /// ## Preconditions
465 /// - The owner must satisfy its invariant.
466 /// - The owner must carry a read memory view.
467 /// ## Postconditions
468 /// - The returned reference is exactly the read view stored in `self.mem_view`.
469 pub proof fn tracked_read_view_unwrap(tracked &self) -> (tracked r: &MemView)
470 requires
471 self.inv(),
472 self.mem_view matches Some(VmIoMemView::ReadView(_)),
473 ensures
474 VmIoMemView::ReadView(r) == self.mem_view.unwrap(),
475 {
476 match &self.mem_view {
477 Some(VmIoMemView::ReadView(r)) => r,
478 _ => { proof_from_false() },
479 }
480 }
481}
482
483/// [`VmWriter`] is a writer for writing data to a contiguous range of memory.
484///
485/// The memory range write by [`VmWriter`] can be in either kernel space or user space.
486/// When the operating range is in kernel space, the memory within that range
487/// is guaranteed to be valid, and the corresponding memory writes are infallible.
488/// When the operating range is in user space, it is ensured that the page table of
489/// the process creating the [`VmWriter`] is active for the duration of `'a`,
490/// and the corresponding memory writes are considered fallible.
491///
492/// When perform writing with a [`VmReader`], if one of them represents typed memory,
493/// it can ensure that the writing range in this writer and reading range in the
494/// reader are not overlapped.
495///
496/// NOTE: The overlap mentioned above is at both the virtual address level
497/// and physical address level. There is not guarantee for the operation results
498/// of [`VmReader`] and [`VmWriter`] in overlapping untyped addresses, and it is
499/// the user's responsibility to handle this situation.
500pub struct VmWriter<'a, Fallibility = Infallible> {
501 pub id: Ghost<nat>,
502 pub cursor: VirtPtr,
503 pub end: VirtPtr,
504 pub phantom: PhantomData<(&'a [u8], Fallibility)>,
505}
506
507#[verus_verify]
508impl<'a> VmWriter<'a, Infallible> {
509 /// Constructs a [`VmWriter`] from a pointer and a length, which represents
510 /// a memory range in kernel space.
511 ///
512 /// # Verified Properties
513 /// ## Preconditions
514 /// - The memory region represented by `ptr` and `len` must be valid for writes of `len` bytes
515 /// during the entire lifetime `a`. This means that the underlying pages must remain alive,
516 /// and the kernel must access the memory region using only the APIs provided in this module.
517 /// - The range `ptr.vaddr..ptr.vaddr + len` must represent a kernel space memory range.
518 /// ## Postconditions
519 /// - An infallible [`VmWriter`] will be created with the range `ptr.vaddr..ptr.vaddr + len`.
520 /// - The created [`VmWriter`] will have a unique identifier `id`, and its cursor will be
521 /// initialized to `ptr`.
522 /// - The created [`VmWriter`] will be associated with a [`VmIoOwner`] that has the same `id`, the
523 /// same memory range, and is marked as kernel space and infallible.
524 /// - The memory view of the associated [`VmIoOwner`] will be `None`, indicating that it does not
525 /// have any specific permissions yet.
526 /// ## Safety
527 ///
528 /// `ptr` must be [valid] for writes of `len` bytes during the entire lifetime `a`.
529 ///
530 /// [valid]: crate::mm::io#safety
531 #[verus_spec(r =>
532 with
533 Ghost(id): Ghost<nat>,
534 Tracked(fallible): Tracked<bool>,
535 -> owner: Tracked<VmIoOwner<'a>>,
536 requires
537 !fallible,
538 ptr.inv(),
539 ptr.range@.start == ptr.vaddr,
540 len == ptr.range@.end - ptr.range@.start,
541 len == 0 || KERNEL_BASE_VADDR <= ptr.vaddr,
542 len == 0 || ptr.vaddr + len <= KERNEL_END_VADDR,
543 ensures
544 owner@.inv(),
545 r.wf(owner@),
546 owner@.is_fallible == fallible,
547 owner@.id == id,
548 owner@.is_kernel,
549 owner@.mem_view is None,
550 r.cursor == ptr,
551 r.end.vaddr == ptr.vaddr + len,
552 r.cursor.range@ == ptr.range@,
553 r.end.range@ == ptr.range@,
554 )]
555 pub unsafe fn from_kernel_space(ptr: VirtPtr, len: usize) -> Self {
556 let ghost range = ptr.range@;
557 let tracked owner = VmIoOwner {
558 id: Ghost(id),
559 range: Ghost(range),
560 is_fallible: fallible,
561 phantom: PhantomData,
562 is_kernel: true,
563 mem_view: None,
564 };
565
566 let ghost range = ptr.vaddr..(ptr.vaddr + len) as usize;
567 let end = VirtPtr { vaddr: ptr.vaddr + len, range: Ghost(range) };
568
569 proof_with!(|= Tracked(owner));
570 Self { id: Ghost(id), cursor: ptr, end, phantom: PhantomData }
571 }
572
573 /// Converts a PoD value into a [`VmWriter`] that writes to the memory occupied by the PoD value.
574 ///
575 /// # Verified Properties
576 /// ## Preconditions
577 /// None as the Rust's type system guarantees that if `T` is valid, then we can always create a
578 /// valid `VmWriter` for it.
579 /// ## Postconditions
580 /// - If the memory region occupied by `val` is valid for writes, then an infallible [`VmWriter`]
581 /// will be created that points to the memory region of `val`.
582 /// - If the memory region occupied by `val` is not valid for writes, then an [`IoError`] will be
583 /// returned.
584 ///
585 /// [`IoError`]: `Error::IoError`
586 #[verus_spec(r =>
587 with
588 Ghost(id): Ghost<nat>,
589 -> owner: Tracked<Result<VmIoOwner<'a>>>,
590 ensures
591 r is Ok <==> owner@ is Ok,
592 r matches Ok(r) ==> {
593 &&& owner@ matches Ok(owner) ==> {
594 &&& r.inv()
595 &&& r.avail_spec() == core::mem::size_of::<T>()
596 &&& owner.inv()
597 &&& r.wf(owner)
598 }
599 }
600 )]
601 pub fn from_pod<T: Pod>(mut val: T) -> Result<VmWriter<'a, Infallible>> {
602 proof_decl! {
603 let tracked mut perm;
604 }
605
606 let bytes = val.as_bytes_mut();
607 let pnt = bytes.as_mut_ptr() as usize;
608 let len = core::mem::size_of::<T>();
609
610 if len != 0 && (pnt < KERNEL_BASE_VADDR || len >= KERNEL_END_VADDR || pnt > KERNEL_END_VADDR
611 - len) || pnt == 0 {
612 proof_with!(|= Tracked(Err(Error::IoError)));
613 Err(Error::IoError)
614 } else {
615 let ghost range = pnt..(pnt + len) as usize;
616 let vptr = VirtPtr { vaddr: pnt as usize, range: Ghost(range) };
617 let r = unsafe {
618 #[verus_spec(with Ghost(id), Tracked(false) => Tracked(perm))]
619 VmWriter::from_kernel_space(vptr, len)
620 };
621
622 proof_with!(|= Tracked(Ok(perm)));
623 Ok(r)
624 }
625 }
626
627 /// Converts an infallible writer into a fallible one.
628 pub fn to_fallible(self) -> VmWriter<'a, Fallible> {
629 VmWriter { id: self.id, cursor: self.cursor, end: self.end, phantom: PhantomData }
630 }
631
632 /// Constructs a `VmWriter<'a, Infallible>` from a mutable byte slice.
633 #[verifier::external_body]
634 pub fn from_slice_mut(slice: &'a mut [u8]) -> Self {
635 let addr = slice.as_mut_ptr() as usize;
636 let end_addr = addr + slice.len();
637 VmWriter {
638 id: Ghost(arbitrary()),
639 cursor: VirtPtr { vaddr: addr, range: Ghost(addr..end_addr) },
640 end: VirtPtr { vaddr: end_addr, range: Ghost(addr..end_addr) },
641 phantom: PhantomData,
642 }
643 }
644}
645
646impl<Fallibility> Clone for VmReader<'_, Fallibility> {
647 /// [`Clone`] can be implemented for [`VmReader`]
648 /// because it either points to untyped memory or represents immutable references.
649 ///
650 /// Note that we cannot implement [`Clone`] for [`VmWriter`]
651 /// because it can represent mutable references, which must remain exclusive.
652 fn clone(&self) -> Self {
653 Self { id: self.id, cursor: self.cursor, end: self.end, phantom: PhantomData }
654 }
655}
656
657#[verus_verify]
658impl<'a> VmReader<'a, Infallible> {
659 /// Constructs a [`VmReader`] from a pointer and a length, which represents
660 /// a memory range in USER space.
661 ///
662 /// # Verified Properties
663 /// ## Preconditions
664 /// - `ptr` must satisfy [`VirtPtr::inv`].
665 /// ## Postconditions
666 /// - The returned [`VmReader`] satisfies its invariant.
667 /// - The returned reader is associated with a [`VmIoOwner`] that satisfies both [`VmIoOwner::inv`]
668 /// and [`VmReader::wf`].
669 /// - The owner has the same range as `ptr`, has no memory view yet, and is marked as user-space
670 /// and infallible.
671 #[verus_spec(r =>
672 with
673 Ghost(id): Ghost<nat>,
674 -> owner: Tracked<VmIoOwner<'a>>,
675 ensures
676 r.inv_wf(),
677 owner@.id == id,
678 owner@.range == ptr.range@,
679 owner@.mem_view is None,
680 !owner@.is_kernel,
681 !owner@.is_fallible,
682 r.cursor == ptr,
683 ptr.vaddr + len <= usize::MAX ==> r.end.vaddr == ptr.vaddr + len,
684 r.end.range@ == ptr.range@,
685 ptr.inv() && ptr.range@.start == ptr.vaddr
686 && len == ptr.range@.end - ptr.range@.start ==> {
687 &&& r.inv()
688 &&& owner@.inv()
689 &&& r.wf(owner@)
690 },
691 )]
692 pub unsafe fn from_user_space(ptr: VirtPtr, len: usize) -> Self {
693 let ghost range = ptr.range@;
694 let tracked owner = VmIoOwner {
695 id: Ghost(id),
696 range: Ghost(range),
697 is_fallible: false,
698 phantom: PhantomData,
699 is_kernel: false,
700 mem_view: None,
701 };
702 let end_vaddr = if ptr.vaddr <= usize::MAX - len {
703 ptr.vaddr + len
704 } else {
705 0
706 };
707 let end = VirtPtr { vaddr: end_vaddr, range: Ghost(range) };
708 proof_with!(|= Tracked(owner));
709 Self { id: Ghost(id), cursor: ptr, end, phantom: PhantomData }
710 }
711
712 /// Constructs a [`VmReader`] from a pointer and a length, which represents
713 /// a memory range in kernel space.
714 ///
715 /// # Verified Properties
716 /// ## Preconditions
717 /// - The memory region represented by `ptr` and `len` must be valid for reads of `len` bytes
718 /// during the entire lifetime `a`. This means that the underlying pages must remain alive,
719 /// and the kernel must access the memory region using only the APIs provided in this module.
720 /// - The range `ptr.vaddr..ptr.vaddr + len` must represent a kernel space memory range.
721 /// ## Postconditions
722 /// - An infallible [`VmReader`] will be created with the range `ptr.vaddr..ptr.vaddr + len`.
723 /// - The created [`VmReader`] will have a unique identifier `id`, and its cursor will be
724 /// initialized to `ptr`.
725 /// - The created [`VmReader`] will be associated with a [`VmIoOwner`] that has the same `id`,
726 /// the same memory range, and is marked as kernel space and infallible.
727 /// ## Safety
728 ///
729 /// `ptr` must be [valid] for reads of `len` bytes during the entire lifetime `a`.
730 ///
731 /// [valid]: crate::mm::io#safety
732 #[verus_spec(r =>
733 with
734 Ghost(id): Ghost<nat>,
735 -> owner: Tracked<VmIoOwner<'a>>,
736 requires
737 ptr.inv(),
738 ptr.range@.start == ptr.vaddr,
739 len == ptr.range@.end - ptr.range@.start,
740 len == 0 || KERNEL_BASE_VADDR <= ptr.vaddr,
741 len == 0 || ptr.vaddr + len <= KERNEL_END_VADDR,
742 ensures
743 owner@.inv(),
744 r.wf(owner@),
745 r.cursor.vaddr == ptr.vaddr,
746 r.end.vaddr == ptr.vaddr + len,
747 r.cursor == ptr,
748 r.end.range@ == ptr.range@,
749 owner@.is_kernel,
750 owner@.id == id,
751 )]
752 pub unsafe fn from_kernel_space(ptr: VirtPtr, len: usize) -> Self {
753 let tracked owner = VmIoOwner {
754 id: Ghost(id),
755 range: Ghost(ptr.vaddr..(ptr.vaddr + len) as usize),
756 is_fallible: false,
757 phantom: PhantomData,
758 is_kernel: true,
759 // This is left empty as will be populated later.
760 mem_view: None,
761 };
762
763 let ghost range = ptr.vaddr..(ptr.vaddr + len) as usize;
764 let end = VirtPtr { vaddr: ptr.vaddr + len, range: Ghost(range) };
765
766 proof_with!(|= Tracked(owner));
767 Self { id: Ghost(id), cursor: ptr, end, phantom: PhantomData }
768 }
769
770 /// Converts a PoD value into a [`VmReader`] that reads from the memory occupied by the PoD value.
771 ///
772 /// # Verified Properties
773 /// ## Preconditions
774 /// None as the Rust's type system guarantees that if `&mut T` is valid, then we can always
775 /// create a valid [`VmReader`] for it.
776 /// ## Postconditions
777 /// - If the memory region occupied by `val` is valid for reads, then an infallible [`VmReader`]
778 /// will be created that points to the memory region of `val`.
779 /// - If the memory region occupied by `val` is not valid for reads, then an [`IoError`] will be
780 /// returned.
781 ///
782 /// [`IoError`]: `Error::IoError`
783 #[verus_spec(r =>
784 with
785 Ghost(id): Ghost<nat>,
786 -> owner: Tracked<Result<VmIoOwner<'a>>>,
787 ensures
788 r is Ok <==> owner@ is Ok,
789 r matches Ok(r) ==> {
790 &&& owner@ matches Ok(owner) ==> {
791 &&& r.inv()
792 &&& r.remain_spec() == core::mem::size_of::<T>()
793 &&& owner.inv()
794 &&& r.wf(owner)
795 }
796 }
797 )]
798 pub fn from_pod<T: Pod>(val: &mut T) -> Result<VmReader<'a, Infallible>> {
799 proof_decl! {
800 let tracked mut perm;
801 }
802
803 let bytes = val.as_bytes_mut();
804 let pnt = bytes.as_mut_ptr() as usize;
805 let len = core::mem::size_of::<T>();
806
807 if len != 0 && (pnt < KERNEL_BASE_VADDR || len >= KERNEL_END_VADDR || pnt > KERNEL_END_VADDR
808 - len) || pnt == 0 {
809 proof_with!(|= Tracked(Err(Error::IoError)));
810 Err(Error::IoError)
811 } else {
812 let ghost range = pnt..(pnt + len) as usize;
813 let vptr = VirtPtr { vaddr: pnt, range: Ghost(range) };
814 let r = unsafe {
815 #[verus_spec(with Ghost(id) => Tracked(perm))]
816 VmReader::from_kernel_space(vptr, len)
817 };
818
819 proof {
820 assert(r.inv());
821 assert(perm.inv());
822 }
823
824 proof_with!(|= Tracked(Ok(perm)));
825 Ok(r)
826 }
827 }
828
829 /// Constructs a `VmReader<'a, Infallible>` from a shared byte slice.
830 #[verifier::external_body]
831 pub fn from_slice(slice: &'a [u8]) -> Self {
832 let addr = slice.as_ptr() as usize;
833 let end_addr = addr + slice.len();
834 VmReader {
835 id: Ghost(arbitrary()),
836 cursor: VirtPtr { vaddr: addr, range: Ghost(addr..end_addr) },
837 end: VirtPtr { vaddr: end_addr, range: Ghost(addr..end_addr) },
838 phantom: PhantomData,
839 }
840 }
841
842 /// Constructs an infallible [`VmReader`] from a shared PoD reference.
843 #[verifier::external_body]
844 pub fn from_pod_ref<T: Pod>(val: &T) -> (r: Self)
845 ensures
846 r.remain_spec() == core::mem::size_of::<T>(),
847 {
848 let bytes = val.as_bytes_ref();
849 let addr = bytes.as_ptr() as usize;
850 let end_addr = addr + bytes.len();
851 VmReader {
852 id: Ghost(arbitrary()),
853 cursor: VirtPtr { vaddr: addr, range: Ghost(addr..end_addr) },
854 end: VirtPtr { vaddr: end_addr, range: Ghost(addr..end_addr) },
855 phantom: PhantomData,
856 }
857 }
858
859 /// Converts an infallible reader into a fallible one.
860 pub fn to_fallible(self) -> (r: VmReader<'a, Fallible>)
861 ensures
862 r.remain_spec() == self.remain_spec(),
863 {
864 VmReader { id: self.id, cursor: self.cursor, end: self.end, phantom: PhantomData }
865 }
866}
867
868// Perhaps we can implement `tryfrom` instead.
869// This trait method should be discarded as we do not want to make VmWriter <N> ?
870#[verus_verify]
871impl<'a> TryFrom<&'a [u8]> for VmWriter<'a, Infallible> {
872 type Error = Error;
873
874 // fn try_from(slice: ArrayPtr<u8, N>, Tracked(owner))??
875 #[verus_spec()]
876 fn try_from(slice: &'a [u8] /* length... */ ) -> Result<Self> {
877 proof_decl! {
878 let tracked mut perm;
879 }
880
881 let addr = slice.as_ptr() as usize;
882
883 if slice.len() != 0 && (addr < KERNEL_BASE_VADDR || slice.len() >= KERNEL_END_VADDR || addr
884 > KERNEL_END_VADDR - slice.len()) || addr == 0 {
885 return Err(Error::IoError);
886 }
887 // SAFETY:
888 // - The memory range points to typed memory.
889 // - The validity requirements for write accesses are met because the pointer is converted
890 // from a mutable reference that outlives the lifetime `'a`.
891 // - The type, i.e., the `u8` slice, is plain-old-data.
892
893 let ghost range = addr..(addr + slice.len()) as usize;
894 let vptr = VirtPtr { vaddr: addr, range: Ghost(range) };
895
896 proof {
897 assert(vptr.inv());
898 }
899
900 Ok(
901 unsafe {
902 #[verus_spec(with Ghost(arbitrary()), Tracked(false) => Tracked(perm))]
903 Self::from_kernel_space(vptr, slice.len())
904 },
905 )
906 }
907}
908
909impl<'a> TryFromSpecImpl<&'a [u8]> for VmWriter<'a, Infallible> {
910 open spec fn obeys_try_from_spec() -> bool {
911 true
912 }
913
914 open spec fn try_from_spec(slice: &'a [u8]) -> Result<Self> {
915 let addr = slice.as_ptr() as usize;
916 let len = slice.len();
917
918 if len != 0 && (addr < KERNEL_BASE_VADDR || len >= KERNEL_END_VADDR || addr
919 > KERNEL_END_VADDR - slice.len()) || addr == 0 {
920 Err(Error::IoError)
921 } else {
922 Ok(
923 Self {
924 id: Ghost(arbitrary()),
925 cursor: VirtPtr { vaddr: addr, range: Ghost(addr..(addr + len) as usize) },
926 end: VirtPtr {
927 vaddr: (addr + len) as usize,
928 range: Ghost(addr..(addr + len) as usize),
929 },
930 phantom: PhantomData,
931 },
932 )
933 }
934 }
935}
936
937type Result<T> = core::result::Result<T, Error>;
938
939/// A trait that enables reading/writing data from/to a VM object,
940/// e.g., [`USegment`], [`Vec<UFrame>`] and [`UFrame`].
941///
942/// # Concurrency
943///
944/// The methods may be executed by multiple concurrent reader and writer
945/// threads. In this case, if the results of concurrent reads or writes
946/// desire predictability or atomicity, the users should add extra mechanism
947/// for such properties.
948///
949/// [`USegment`]: crate::mm::USegment
950/// [`UFrame`]: crate::mm::UFrame
951///
952/// Note: In this trait we follow the standard of `vstd` trait that allows precondition and
953/// postcondition overriding by introducing `obeys_`, `_requires`, and `_ensures` spec functions.
954///
955/// `P` is the type of the permission/ownership token used to track the state of the VM object.
956pub trait VmIo<P: Sized>: Send + Sync + Sized {
957 spec fn obeys_vmio_spec() -> bool;
958
959 open spec fn obeys_vmio_read_requires() -> bool
960 recommends
961 Self::obeys_vmio_spec(),
962 {
963 false
964 }
965
966 open spec fn obeys_vmio_write_requires() -> bool
967 recommends
968 Self::obeys_vmio_spec(),
969 {
970 false
971 }
972
973 spec fn obeys_vmio_read_spec() -> bool
974 recommends
975 Self::obeys_vmio_spec(),
976 ;
977
978 spec fn obeys_vmio_write_spec() -> bool
979 recommends
980 Self::obeys_vmio_spec(),
981 ;
982
983 open spec fn read_requires(
984 self,
985 offset: usize,
986 writer: VmWriter<'_>,
987 writer_own: VmIoOwner<'_>,
988 owner: P,
989 ) -> bool {
990 true
991 }
992
993 open spec fn write_requires(
994 self,
995 offset: usize,
996 reader: VmReader<'_>,
997 reader_own: VmIoOwner<'_>,
998 owner: P,
999 ) -> bool {
1000 true
1001 }
1002
1003 spec fn read_spec(
1004 self,
1005 offset: usize,
1006 old_writer: VmWriter<'_>,
1007 new_writer: VmWriter<'_>,
1008 old_writer_own: VmIoOwner<'_>,
1009 new_writer_own: VmIoOwner<'_>,
1010 old_owner: P,
1011 new_owner: P,
1012 r: Result<()>,
1013 ) -> bool;
1014
1015 spec fn write_spec(
1016 self,
1017 offset: usize,
1018 old_reader: VmReader<'_>,
1019 new_reader: VmReader<'_>,
1020 old_writer_own: VmIoOwner<'_>,
1021 new_writer_own: VmIoOwner<'_>,
1022 old_owner: P,
1023 new_owner: P,
1024 r: Result<()>,
1025 ) -> bool;
1026
1027 /// Reads requested data at a specified offset into a given [`VmWriter`].
1028 ///
1029 /// # No short reads
1030 ///
1031 /// On success, the `writer` must be written with the requested data
1032 /// completely. If, for any reason, the requested data is only partially
1033 /// available, then the method shall return an error.
1034 fn read(
1035 &self,
1036 offset: usize,
1037 writer: &mut VmWriter<'_>,
1038 Tracked(writer_own): Tracked<&mut VmIoOwner<'_>>,
1039 Tracked(owner): Tracked<&mut P>,
1040 ) -> (r: Result<()>)
1041 requires
1042 Self::obeys_vmio_read_requires() ==> Self::read_requires(
1043 *self,
1044 offset,
1045 *old(writer),
1046 *old(writer_own),
1047 *old(owner),
1048 ),
1049 ensures
1050 Self::obeys_vmio_read_spec() ==> Self::read_spec(
1051 *self,
1052 offset,
1053 *old(writer),
1054 *final(writer),
1055 *old(writer_own),
1056 *final(writer_own),
1057 *old(owner),
1058 *final(owner),
1059 r,
1060 ),
1061 ;
1062
1063 /// Writes all data from a given `VmReader` at a specified offset.
1064 ///
1065 /// # No short writes
1066 ///
1067 /// On success, the data from the `reader` must be read to the VM object entirely.
1068 /// If, for any reason, the input data can only be written partially,
1069 /// then the method shall return an error.
1070 fn write(
1071 &self,
1072 offset: usize,
1073 reader: &mut VmReader,
1074 Tracked(writer_own): Tracked<&mut VmIoOwner<'_>>,
1075 Tracked(owner): Tracked<&mut P>,
1076 ) -> (r: Result<()>)
1077 requires
1078 Self::obeys_vmio_write_requires() ==> Self::write_requires(
1079 *self,
1080 offset,
1081 *old(reader),
1082 *old(writer_own),
1083 *old(owner),
1084 ),
1085 ensures
1086 Self::obeys_vmio_write_spec() ==> Self::write_spec(
1087 *self,
1088 offset,
1089 *old(reader),
1090 *final(reader),
1091 *old(writer_own),
1092 *final(writer_own),
1093 *old(owner),
1094 *final(owner),
1095 r,
1096 ),
1097 ;
1098
1099 /// Reads a specified number of bytes at a specified offset into a given buffer.
1100 ///
1101 /// # No short reads
1102 ///
1103 /// Similar to [`read`].
1104 ///
1105 /// [`read`]: VmIo::read
1106 fn read_byte<const N: usize>(
1107 &self,
1108 offset: usize,
1109 bytes: ArrayPtr<u8, N>,
1110 Tracked(bytes_owner): Tracked<&mut PointsToArray<u8, N>>,
1111 Tracked(owner): Tracked<&mut P>,
1112 ) -> (r: Result<()>)
1113 // requires
1114 // Self::obeys_vmio_write_requires() ==> Self::vmio_write_requires(
1115 // *self,
1116 // *old(owner),
1117 // offset,
1118 // ),
1119 // ensures
1120 // Self::obeys_vmio_write_ensures() ==> Self::vmio_write_ensures(
1121 // *self,
1122 // *old(owner),
1123 // offset,
1124 // *owner,
1125 // r,
1126 // ),
1127 ;
1128
1129 // fn write_val<T: Pod>(&self, offset: usize, val: T, Tracked(owner): Tracked<&mut P>) -> (r:
1130 // Result<()>)
1131 // requires
1132 // Self::obeys_vmio_write_requires() ==> Self::vmio_write_requires(
1133 // *self,
1134 // *old(owner),
1135 // offset,
1136 // ),
1137 // ensures
1138 // Self::obeys_vmio_write_ensures() ==> Self::vmio_write_ensures(
1139 // *self,
1140 // *old(owner),
1141 // offset,
1142 // *owner,
1143 // r,
1144 // ),
1145 // ;
1146
1147 // fn write_slice<T: Pod, const N: usize>(
1148 // &self,
1149 // offset: usize,
1150 // slice: ArrayPtr<T, N>,
1151 // Tracked(slice_owner): Tracked<&mut PointsToArray<T, N>>,
1152 // Tracked(owner): Tracked<&mut P>,
1153 // ) -> (r: Result<()>)
1154 // requires
1155 // Self::obeys_vmio_write_requires() ==> Self::vmio_write_requires(
1156 // *self,
1157 // *old(owner),
1158 // offset,
1159 // ),
1160 // ensures
1161 // Self::obeys_vmio_write_ensures() ==> Self::vmio_write_ensures(
1162 // *self,
1163 // *old(owner),
1164 // offset,
1165 // *final(owner),
1166 // r,
1167 // ),
1168 // ;
1169}
1170
1171/// A trait that enables reading/writing data from/to a VM object using one non-tearing memory
1172/// load/store.
1173///
1174/// See also [`VmIo`], which enables reading/writing data from/to a VM object without the guarantee
1175/// of using one non-tearing memory load/store.
1176pub trait VmIoOnce: Sized {
1177 spec fn obeys_vmio_once_read_requires() -> bool;
1178
1179 spec fn obeys_vmio_once_write_requires() -> bool;
1180
1181 spec fn obeys_vmio_once_read_ensures() -> bool;
1182
1183 spec fn obeys_vmio_once_write_ensures() -> bool;
1184
1185 /// Reads a value of the `PodOnce` type at the specified offset using one non-tearing memory
1186 /// load.
1187 ///
1188 /// Except that the offset is specified explicitly, the semantics of this method is the same as
1189 /// [`VmReader::read_once`].
1190 fn read_once<T: PodOnce>(&self, offset: usize) -> Result<T>;
1191
1192 /// Writes a value of the `PodOnce` type at the specified offset using one non-tearing memory
1193 /// store.
1194 ///
1195 /// Except that the offset is specified explicitly, the semantics of this method is the same as
1196 /// [`VmWriter::write_once`].
1197 fn write_once<T: PodOnce>(&self, offset: usize, new_val: &T) -> Result<()>;
1198}
1199
1200#[verus_verify]
1201impl<Fallibility> VmReader<'_, Fallibility> {
1202 pub open spec fn remain_spec(&self) -> usize {
1203 (self.end.vaddr - self.cursor.vaddr) as usize
1204 }
1205
1206 /// Returns the number of remaining bytes that can be read.
1207 ///
1208 /// # Verified Properties
1209 /// ## Preconditions
1210 /// - `self` must satisfy its invariant.
1211 /// ## Postconditions
1212 /// - The returned value equals [`Self::remain_spec`].
1213 #[inline]
1214 #[verus_spec(r =>
1215 requires
1216 self.inv(),
1217 ensures
1218 r == self.remain_spec(),
1219 )]
1220 #[verifier::when_used_as_spec(remain_spec)]
1221 pub fn remain(&self) -> usize {
1222 self.end.vaddr - self.cursor.vaddr
1223 }
1224
1225 /// Advances the cursor by `len` bytes.
1226 ///
1227 /// # Verified Properties
1228 /// ## Preconditions
1229 /// - `self` must satisfy its invariant.
1230 /// - `len` must not exceed the remaining readable bytes.
1231 /// ## Postconditions
1232 /// - `self` still satisfies its invariant.
1233 /// - The cursor advances by `len`.
1234 /// - The remaining readable bytes decrease by `len`.
1235 /// - The reader identity and end cursor are preserved.
1236 #[inline]
1237 #[verus_spec(
1238 requires
1239 old(self).inv(),
1240 len <= old(self).remain_spec(),
1241 ensures
1242 final(self).inv(),
1243 final(self).cursor.vaddr == old(self).cursor.vaddr + len,
1244 final(self).remain_spec() == old(self).remain_spec() - len,
1245 final(self).id == old(self).id,
1246 final(self).end == old(self).end,
1247 )]
1248 pub fn advance(&mut self, len: usize) {
1249 self.cursor.vaddr = self.cursor.vaddr + len;
1250 }
1251
1252 /// Reads data from `self` and writes it into the provided `writer`.
1253 ///
1254 /// This function acts as the source side of a transfer. It copies data from
1255 /// the current instance (`self`) into the destination `writer`, up to the limit
1256 /// of available data in `self` or available space in `writer` (whichever is smaller).
1257 ///
1258 /// # Logic
1259 ///
1260 /// 1. Calculates the copy length: `min(self.remaining_data, writer.available_space)`.
1261 /// 2. Copies bytes from `self`'s internal buffer to `writer`'s buffer.
1262 /// 3. Advances the cursors of both `self` and `writer`.
1263 ///
1264 /// # Arguments
1265 ///
1266 /// * `writer` - The destination `VmWriter` where the data will be copied to.
1267 ///
1268 /// # Returns
1269 ///
1270 /// The number of bytes actually transferred.
1271 ///
1272 /// # Verified Properties
1273 /// ## Preconditions
1274 /// - The reader, writer, and both associated owners must satisfy their invariants.
1275 /// - The owners must match the given reader and writer.
1276 /// - The writer owner must carry a write memory view.
1277 /// - The source and destination ranges must not overlap.
1278 /// - The reader owner must provide initialized readable memory for the readable range.
1279 /// ## Postconditions
1280 /// - The reader, writer, and both owners still satisfy their invariants.
1281 /// - The owners still match the updated reader and writer.
1282 /// - The returned byte count equals the minimum of readable bytes and writable bytes.
1283 /// - Both cursors advance by exactly the returned byte count.
1284 #[verus_spec(r =>
1285 with
1286 Tracked(owner_r): Tracked<&mut VmIoOwner<'_>>,
1287 Tracked(owner_w): Tracked<&mut VmIoOwner<'_>>,
1288 requires
1289 old(self).inv(),
1290 old(writer).inv(),
1291 old(self).cursor.vaddr > 0,
1292 old(writer).cursor.vaddr > 0,
1293 old(owner_r).inv(),
1294 old(owner_w).inv(),
1295 old(self).wf(*old(owner_r)),
1296 old(writer).wf(*old(owner_w)),
1297 old(owner_w).mem_view matches Some(VmIoMemView::WriteView(_)),
1298 // Non-overlapping requirements.
1299 old(writer).cursor.range@.start >= old(self).cursor.range@.end
1300 || old(self).cursor.range@.start >= old(writer).cursor.range@.end,
1301 old(owner_r).read_view_initialized(),
1302 ensures
1303 final(self).inv(),
1304 final(writer).inv(),
1305 final(owner_r).inv(),
1306 final(owner_w).inv(),
1307 final(self).wf(*final(owner_r)),
1308 final(writer).wf(*final(owner_w)),
1309 r == vstd::math::min(old(self).remain_spec() as int, old(writer).avail_spec() as int),
1310 final(self).remain_spec() == old(self).remain_spec() - r as usize,
1311 final(self).cursor.vaddr == old(self).cursor.vaddr + r as usize,
1312 final(writer).avail_spec() == old(writer).avail_spec() - r as usize,
1313 final(writer).cursor.vaddr == old(writer).cursor.vaddr + r as usize,
1314 )]
1315 pub fn read<WFallibility>(&mut self, writer: &mut VmWriter<'_, WFallibility>) -> usize {
1316 let mut copy_len = if self.remain() < writer.avail() {
1317 self.remain()
1318 } else {
1319 writer.avail()
1320 };
1321
1322 if copy_len == 0 {
1323 return 0;
1324 }
1325 let tracked mv_r = match owner_r.mem_view.tracked_take() {
1326 VmIoMemView::ReadView(mv) => mv,
1327 _ => { proof_from_false() },
1328 };
1329 let tracked mut mv_w = match owner_w.mem_view.tracked_take() {
1330 VmIoMemView::WriteView(mv) => mv,
1331 _ => { proof_from_false() },
1332 };
1333 let ghost mv_w_pre = mv_w;
1334
1335 // Now `memcpy` becomes a `safe` APIs since now we have the tracked permissions
1336 // for both reader and writer to guarantee that the memory accesses are valid.
1337 //
1338 // This is equivalent to: memcpy(writer.cursor.vaddr, self.cursor.vaddr, copy_len);
1339 proof {
1340 assert forall|i: usize|
1341 #![trigger mv_r.addr_transl(i)]
1342 self.cursor.vaddr <= i < self.cursor.vaddr + copy_len implies {
1343 &&& mv_r.addr_transl(i) is Some
1344 &&& mv_r.memory.contains_key(mv_r.addr_transl(i).unwrap().0)
1345 &&& mv_r.memory[mv_r.addr_transl(i).unwrap().0].contents[mv_r.addr_transl(
1346 i,
1347 ).unwrap().1 as int] is Init
1348 } by {
1349 assert(owner_r.range@.start == self.cursor.vaddr);
1350 assert(owner_r.range@.end == self.end.vaddr);
1351 }
1352 }
1353 VirtPtr::copy_nonoverlapping(
1354 &self.cursor,
1355 &writer.cursor,
1356 Tracked(mv_r),
1357 Tracked(&mut mv_w),
1358 copy_len,
1359 );
1360
1361 self.advance(copy_len);
1362 writer.advance(copy_len);
1363
1364 proof {
1365 let ghost mv_w0 = mv_w;
1366 owner_w.mem_view = Some(VmIoMemView::WriteView(mv_w));
1367 owner_r.mem_view = Some(VmIoMemView::ReadView(mv_r));
1368
1369 assert forall|va|
1370 owner_w.range@.start <= va < owner_w.range@.end implies mv_w.addr_transl(
1371 va,
1372 ) is Some by {
1373 assert(mv_w.mappings == mv_w_pre.mappings);
1374 assert(mv_w.addr_transl(va) == mv_w_pre.addr_transl(va));
1375 }
1376
1377 owner_w.advance(copy_len);
1378 owner_r.advance(copy_len);
1379 }
1380
1381 copy_len
1382 }
1383
1384 /// Reads a value of `Pod` type.
1385 ///
1386 /// If the length of the `Pod` type exceeds `self.remain()`,
1387 /// this method will return `Err`.
1388 ///
1389 /// # Verified Properties
1390 /// ## Preconditions
1391 /// - The reader and its owner must satisfy their invariants.
1392 /// - The owner must match this reader and carry a read memory view.
1393 /// - The readable range must translate to initialized bytes in the read view.
1394 /// ## Postconditions
1395 /// - The reader and owner still satisfy their invariants.
1396 /// - On success, the cursor advances by `size_of::<T>()`.
1397 /// - On error, the reader state is unchanged.
1398 #[verus_spec(r =>
1399 with
1400 Tracked(owner): Tracked<&mut VmIoOwner<'_>>,
1401 requires
1402 old(self).inv(),
1403 old(owner).inv(),
1404 old(self).wf(*old(owner)),
1405 old(owner).mem_view matches Some(VmIoMemView::ReadView(_)),
1406 old(owner).mem_view matches Some(VmIoMemView::ReadView(mem_src)) ==> {
1407 forall|i: usize|
1408 #![trigger mem_src.addr_transl(i)]
1409 old(self).cursor.vaddr <= i < old(self).cursor.vaddr + old(self).remain_spec() ==> {
1410 &&& mem_src.addr_transl(i) is Some
1411 &&& mem_src.memory.contains_key(mem_src.addr_transl(i).unwrap().0)
1412 &&& mem_src.memory[mem_src.addr_transl(i).unwrap().0].contents[mem_src.addr_transl(i).unwrap().1 as int] is Init
1413 }
1414 },
1415 ensures
1416 final(self).inv(),
1417 final(owner).inv(),
1418 final(self).wf(*final(owner)),
1419 match r {
1420 Ok(_) => {
1421 &&& final(self).remain_spec() == old(self).remain_spec() - core::mem::size_of::<T>()
1422 &&& final(self).cursor.vaddr == old(self).cursor.vaddr + core::mem::size_of::<T>()
1423 },
1424 Err(_) => {
1425 *old(self) == *final(self)
1426 },
1427 }
1428 )]
1429 pub fn read_val<T: Pod>(&mut self) -> Result<T> {
1430 if self.remain() < core::mem::size_of::<T>() {
1431 return Err(Error::InvalidArgs);
1432 }
1433 let len = core::mem::size_of::<T>();
1434 let tracked mem_src = owner.tracked_read_view_unwrap();
1435
1436 proof {
1437 assert forall|i: usize|
1438 #![trigger mem_src.addr_transl(i)]
1439 self.cursor.vaddr <= i < self.cursor.vaddr + core::mem::size_of::<T>() implies {
1440 &&& mem_src.addr_transl(i) is Some
1441 &&& mem_src.memory.contains_key(mem_src.addr_transl(i).unwrap().0)
1442 &&& mem_src.memory[mem_src.addr_transl(i).unwrap().0].contents[mem_src.addr_transl(
1443 i,
1444 ).unwrap().1 as int] is Init
1445 } by {}
1446 }
1447 proof_with!(Tracked(mem_src));
1448 let v = read_pod_from_view(self.cursor);
1449
1450 self.advance(len);
1451
1452 proof {
1453 owner.advance(len);
1454 }
1455
1456 Ok(v)
1457 }
1458
1459 /// Reads a value of the `PodOnce` type using one non-tearing memory load.
1460 ///
1461 /// If the length of the `PodOnce` type exceeds `self.remain()`, this method will return `Err`.
1462 ///
1463 /// This method will not compile if the `Pod` type is too large for the current architecture
1464 /// and the operation must be tear into multiple memory loads.
1465 ///
1466 /// # Panics
1467 ///
1468 /// This method will panic if the current position of the reader does not meet the alignment
1469 /// requirements of type `T`.
1470 ///
1471 /// # Verified Properties
1472 /// ## Preconditions
1473 /// - The reader and its owner must satisfy their invariants.
1474 /// - The owner must match this reader and carry a read memory view.
1475 /// - The readable range must translate to initialized bytes in the read view.
1476 /// - The current cursor must satisfy the alignment requirements of `T`.
1477 /// ## Postconditions
1478 /// - The reader and owner still satisfy their invariants.
1479 /// - On success, the cursor advances by `size_of::<T>()`.
1480 /// - On error, the reader state is unchanged.
1481 #[verus_spec(r =>
1482 with
1483 Tracked(owner): Tracked<&mut VmIoOwner<'_>>,
1484 requires
1485 old(self).inv(),
1486 old(owner).inv(),
1487 old(self).wf(*old(owner)),
1488 old(owner).mem_view matches Some(VmIoMemView::ReadView(_)),
1489 old(self).cursor.vaddr % core::mem::align_of::<T>() == 0,
1490 old(owner).mem_view matches Some(VmIoMemView::ReadView(mem_src)) ==> {
1491 forall|i: usize|
1492 #![trigger mem_src.addr_transl(i)]
1493 old(self).cursor.vaddr <= i < old(self).cursor.vaddr + core::mem::size_of::<T>() ==> {
1494 &&& mem_src.addr_transl(i) is Some
1495 &&& mem_src.memory.contains_key(mem_src.addr_transl(i).unwrap().0)
1496 &&& mem_src.memory[mem_src.addr_transl(i).unwrap().0].contents[mem_src.addr_transl(i).unwrap().1 as int] is Init
1497 }
1498 },
1499 ensures
1500 final(self).inv(),
1501 final(owner).inv(),
1502 final(self).wf(*final(owner)),
1503 match r {
1504 Ok(_) => {
1505 &&& final(self).remain_spec() == old(self).remain_spec() - core::mem::size_of::<T>()
1506 &&& final(self).cursor.vaddr == old(self).cursor.vaddr + core::mem::size_of::<T>()
1507 },
1508 Err(_) => {
1509 *old(self) == *final(self)
1510 },
1511 }
1512 )]
1513 pub fn read_val_once<T: PodOnce>(&mut self) -> Result<T> {
1514 if core::mem::size_of::<T>() > self.remain() {
1515 return Err(Error::InvalidArgs);
1516 }
1517 // SAFETY: We have checked that the number of bytes remaining is at least the size of `T`
1518 // and that the cursor is properly aligned with respect to the type `T`. All other safety
1519 // requirements are the same as for `Self::read`.
1520
1521 let tracked mem_src = owner.tracked_read_view_unwrap();
1522 proof {
1523 assert forall|i: usize|
1524 #![trigger mem_src.addr_transl(i)]
1525 self.cursor.vaddr <= i < self.cursor.vaddr + core::mem::size_of::<T>() implies {
1526 &&& mem_src.addr_transl(i) is Some
1527 &&& mem_src.memory.contains_key(mem_src.addr_transl(i).unwrap().0)
1528 &&& mem_src.memory[mem_src.addr_transl(i).unwrap().0].contents[mem_src.addr_transl(
1529 i,
1530 ).unwrap().1 as int] is Init
1531 } by {}
1532 }
1533 proof_with!(Tracked(mem_src));
1534 let v = read_once_from_view::<T>(self.cursor);
1535 self.advance(core::mem::size_of::<T>());
1536
1537 proof {
1538 owner.advance(core::mem::size_of::<T>());
1539 }
1540
1541 Ok(v)
1542 }
1543}
1544
1545#[verus_verify]
1546impl<'a, Fallibility> VmWriter<'a, Fallibility> {
1547 pub open spec fn avail_spec(&self) -> usize {
1548 (self.end.vaddr - self.cursor.vaddr) as usize
1549 }
1550
1551 /// Constructs a [`VmWriter`] from a pointer, which represents a memory range in USER space.
1552 ///
1553 /// ⚠️ WARNING: Currently not implemented yet.
1554 ///
1555 /// # Verified Properties
1556 /// ## Preconditions
1557 /// - `ptr` must satisfy [`VirtPtr::inv`].
1558 /// ## Postconditions
1559 /// - The returned [`VmWriter`] satisfies its invariant.
1560 /// - The returned writer is associated with a [`VmIoOwner`] that satisfies both [`VmIoOwner::inv`]
1561 /// and [`VmWriter::wf`].
1562 /// - The owner has the same range as `ptr`, has no memory view yet, and is marked as user-space
1563 /// and infallible.
1564 #[verus_spec(r =>
1565 with
1566 Ghost(id): Ghost<nat>,
1567 -> owner: Tracked<VmIoOwner<'a>>,
1568 ensures
1569 r.inv_wf(),
1570 owner@.id == id,
1571 owner@.range == ptr.range@,
1572 owner@.mem_view is None,
1573 !owner@.is_kernel,
1574 !owner@.is_fallible,
1575 r.cursor == ptr,
1576 ptr.vaddr + len <= usize::MAX ==> r.end.vaddr == ptr.vaddr + len,
1577 r.end.range@ == ptr.range@,
1578 ptr.inv() && ptr.range@.start == ptr.vaddr
1579 && len == ptr.range@.end - ptr.range@.start ==> {
1580 &&& r.inv()
1581 &&& owner@.inv()
1582 &&& r.wf(owner@)
1583 },
1584 )]
1585 pub unsafe fn from_user_space(ptr: VirtPtr, len: usize) -> Self {
1586 let ghost range = ptr.range@;
1587 let tracked owner = VmIoOwner {
1588 id: Ghost(id),
1589 range: Ghost(range),
1590 is_fallible: false,
1591 phantom: PhantomData,
1592 is_kernel: false,
1593 mem_view: None,
1594 };
1595 let end_vaddr = if ptr.vaddr <= usize::MAX - len {
1596 ptr.vaddr + len
1597 } else {
1598 0
1599 };
1600 let end = VirtPtr { vaddr: end_vaddr, range: Ghost(range) };
1601 proof_with!(|= Tracked(owner));
1602 Self { id: Ghost(id), cursor: ptr, end, phantom: PhantomData }
1603 }
1604
1605 /// Returns the number of available bytes that can be written.
1606 ///
1607 /// This has the same implementation as [`VmReader::remain`] but semantically
1608 /// they are different.
1609 ///
1610 /// # Verified Properties
1611 /// ## Preconditions
1612 /// - `self` must satisfy its invariant.
1613 /// ## Postconditions
1614 /// - The returned value equals [`Self::avail_spec`].
1615 #[inline]
1616 #[verus_spec(r =>
1617 requires
1618 self.inv(),
1619 ensures
1620 r == self.avail_spec(),
1621 )]
1622 #[verifier::when_used_as_spec(avail_spec)]
1623 pub fn avail(&self) -> usize {
1624 self.end.vaddr - self.cursor.vaddr
1625 }
1626
1627 /// Advances the cursor by `len` bytes.
1628 ///
1629 /// # Verified Properties
1630 /// ## Preconditions
1631 /// - `self` must satisfy its invariant.
1632 /// - `len` must not exceed the remaining writable bytes.
1633 /// ## Postconditions
1634 /// - `self` still satisfies its invariant.
1635 /// - The cursor advances by `len`.
1636 /// - The remaining writable bytes decrease by `len`.
1637 /// - The writer identity and end cursor are preserved.
1638 #[inline]
1639 #[verus_spec(
1640 requires
1641 old(self).inv(),
1642 len <= old(self).avail_spec(),
1643 ensures
1644 final(self).inv(),
1645 final(self).avail_spec() == old(self).avail_spec() - len,
1646 final(self).cursor.vaddr == old(self).cursor.vaddr + len,
1647 final(self).cursor.range@ == old(self).cursor.range@,
1648 final(self).id == old(self).id,
1649 final(self).end == old(self).end,
1650 final(self).cursor.range@ == old(self).cursor.range@,
1651 )]
1652 pub fn advance(&mut self, len: usize) {
1653 self.cursor.vaddr = self.cursor.vaddr + len;
1654 }
1655
1656 /// Writes data into `self` by reading from the provided `reader`.
1657 ///
1658 /// This function treats `self` as the destination buffer. It pulls data *from*
1659 /// the source `reader` and writes it into the current instance.
1660 ///
1661 /// # Arguments
1662 ///
1663 /// * `reader` - The source `VmReader` to read data from.
1664 ///
1665 /// # Returns
1666 ///
1667 /// Returns the number of bytes written to `self` (which is equal to the number of bytes read from `reader`).
1668 ///
1669 /// # Verified Properties
1670 /// ## Preconditions
1671 /// - The writer, reader, and both associated owners must satisfy their invariants.
1672 /// - The owners must match the given writer and reader.
1673 /// - The writer owner must carry a write memory view.
1674 /// - The source and destination ranges must not overlap.
1675 /// - The reader owner must provide initialized readable memory for the readable range.
1676 /// ## Postconditions
1677 /// - The writer, reader, and both owners still satisfy their invariants.
1678 /// - The owners still match the updated writer and reader.
1679 /// - The returned byte count equals the minimum of writable bytes and readable bytes.
1680 /// - Both cursors advance by exactly the returned byte count.
1681 #[inline]
1682 #[verus_spec(r =>
1683 with
1684 Tracked(owner_w): Tracked<&mut VmIoOwner<'_>>,
1685 Tracked(owner_r): Tracked<&mut VmIoOwner<'_>>,
1686 requires
1687 old(self).inv(),
1688 old(reader).inv(),
1689 old(self).cursor.vaddr > 0,
1690 old(reader).cursor.vaddr > 0,
1691 old(owner_w).inv(),
1692 old(owner_r).inv(),
1693 old(self).wf(*old(owner_w)),
1694 old(reader).wf(*old(owner_r)),
1695 old(owner_w).mem_view matches Some(VmIoMemView::WriteView(_)),
1696 // Non-overlapping requirements.
1697 old(self).cursor.range@.start >= old(reader).cursor.range@.end
1698 || old(reader).cursor.range@.start >= old(self).cursor.range@.end,
1699 old(owner_r).read_view_initialized(),
1700 ensures
1701 final(self).inv(),
1702 final(reader).inv(),
1703 final(owner_w).inv(),
1704 final(owner_r).inv(),
1705 final(self).wf(*final(owner_w)),
1706 final(reader).wf(*final(owner_r)),
1707 r == vstd::math::min(old(self).avail_spec() as int, old(reader).remain_spec() as int),
1708 final(self).avail_spec() == old(self).avail_spec() - r as usize,
1709 final(self).cursor.vaddr == old(self).cursor.vaddr + r as usize,
1710 final(reader).remain_spec() == old(reader).remain_spec() - r as usize,
1711 final(reader).cursor.vaddr == old(reader).cursor.vaddr + r as usize,
1712 )]
1713 pub fn write<RFallibility>(&mut self, reader: &mut VmReader<'_, RFallibility>) -> usize {
1714 proof_with!(Tracked(owner_r), Tracked(owner_w));
1715 reader.read(self)
1716 }
1717
1718 /// Writes a value of `Pod` type.
1719 ///
1720 /// If the length of the `Pod` type exceeds `self.avail()`,
1721 /// this method will return `Err`.
1722 ///
1723 /// # Verified Properties
1724 /// ## Preconditions
1725 /// - The writer and its owner must satisfy their invariants.
1726 /// - The owner must match this writer and carry a memory view.
1727 /// ## Postconditions
1728 /// - The writer and owner still satisfy their invariants.
1729 /// - The owner parameters tracked by [`VmIoOwner::params_eq`] are preserved.
1730 /// - On success, the cursor advances by `size_of::<T>()`.
1731 /// - On error, the writer state is unchanged.
1732 #[verifier::external_body]
1733 #[verus_spec(r =>
1734 with
1735 Ghost(id): Ghost<nat>,
1736 Tracked(owner_w): Tracked<&mut VmIoOwner<'_>>,
1737 Tracked(memview_r): Tracked<&MemView>,
1738 requires
1739 old(self).inv(),
1740 old(owner_w).inv(),
1741 old(self).wf(*old(owner_w)),
1742 old(owner_w).mem_view is Some,
1743 ensures
1744 final(self).inv(),
1745 final(owner_w).inv(),
1746 final(self).wf(*final(owner_w)),
1747 match r {
1748 Ok(_) => {
1749 &&& final(self).avail_spec() == old(self).avail_spec() - core::mem::size_of::<T>()
1750 &&& final(self).cursor.vaddr == old(self).cursor.vaddr + core::mem::size_of::<T>()
1751 },
1752 Err(_) => {
1753 *old(self) == *final(self)
1754 },
1755 }
1756 )]
1757 pub fn write_val<T: Pod>(&mut self, val: &mut T) -> Result<()> {
1758 if self.avail() < core::mem::size_of::<T>() {
1759 return Err(Error::InvalidArgs);
1760 }
1761 proof_with!(Ghost(id) => Tracked(owner_r));
1762 let mut reader = VmReader::from_pod(val)?;
1763
1764 let tracked mut owner_r = owner_r.tracked_unwrap();
1765
1766 proof_with!(Tracked(owner_w), Tracked(&mut owner_r));
1767 self.write(&mut reader);
1768
1769 Ok(())
1770 }
1771
1772 #[verifier::external_body]
1773 #[verus_spec(
1774 requires
1775 self.inv(),
1776 core::mem::size_of::<T>() <= self.avail_spec(),
1777 )]
1778 fn write_once_inner<T: PodOnce>(&self, new_val: &mut T) {
1779 let cursor = self.cursor.vaddr as *mut T;
1780 unsafe { cursor.write_volatile(*new_val) };
1781 }
1782
1783 /// Writes a value of the `PodOnce` type using one non-tearing memory store.
1784 ///
1785 /// If the length of the `PodOnce` type exceeds `self.avail()`, this method will return `Err`.
1786 ///
1787 /// # Verified Properties
1788 /// ## Preconditions
1789 /// - The writer and its owner must satisfy their invariants.
1790 /// - The owner must match this writer and carry a memory view.
1791 /// ## Postconditions
1792 /// - The writer and owner still satisfy their invariants.
1793 /// - On success, the cursor advances by `size_of::<T>()`.
1794 /// - On error, the writer state is unchanged.
1795 #[verus_spec(r =>
1796 with
1797 Tracked(owner_w): Tracked<&mut VmIoOwner<'_>>,
1798 requires
1799 old(self).inv(),
1800 old(owner_w).mem_view is Some,
1801 old(owner_w).inv(),
1802 old(self).wf(*old(owner_w)),
1803 ensures
1804 final(self).inv(),
1805 final(owner_w).inv(),
1806 final(self).wf(*final(owner_w)),
1807 match r {
1808 Ok(_) => {
1809 &&& final(self).avail_spec() == old(self).avail_spec() - core::mem::size_of::<T>()
1810 &&& final(self).cursor.vaddr == old(self).cursor.vaddr + core::mem::size_of::<T>()
1811 },
1812 Err(_) => {
1813 *old(self) == *final(self)
1814 },
1815 }
1816 )]
1817 pub fn write_once<T: PodOnce>(&mut self, new_val: &mut T) -> Result<()> {
1818 if core::mem::size_of::<T>() > self.avail() {
1819 return Err(Error::InvalidArgs);
1820 }
1821 // SAFETY: We have checked that the number of bytes available is at least the size of `T`
1822 // and that the cursor is properly aligned with respect to the type `T`. All other safety
1823 // requirements are the same as for `Self::write`.
1824
1825 self.write_once_inner::<T>(new_val);
1826 self.advance(core::mem::size_of::<T>());
1827
1828 proof {
1829 owner_w.advance(core::mem::size_of::<T>());
1830 }
1831
1832 Ok(())
1833 }
1834}
1835
1836} // verus!
1837
1838impl<'a> From<&'a [u8]> for VmReader<'a, Infallible> {
1839 fn from(slice: &'a [u8]) -> Self {
1840 Self::from_slice(slice)
1841 }
1842}
1843
1844impl<'a> From<&'a mut [u8]> for VmWriter<'a, Infallible> {
1845 fn from(slice: &'a mut [u8]) -> Self {
1846 Self::from_slice_mut(slice)
1847 }
1848}
1849
1850/// Fallible memory read from a `VmWriter`.
1851pub trait FallibleVmRead<F> {
1852 fn read_fallible(
1853 &mut self,
1854 writer: &mut VmWriter<'_, F>,
1855 ) -> core::result::Result<usize, (Error, usize)>;
1856}
1857
1858/// Fallible memory write from a `VmReader`.
1859pub trait FallibleVmWrite<F> {
1860 fn write_fallible(
1861 &mut self,
1862 reader: &mut VmReader<'_, F>,
1863 ) -> core::result::Result<usize, (Error, usize)>;
1864}
1865
1866macro_rules! impl_read_fallible {
1867 ($reader_fallibility:ty, $writer_fallibility:ty) => {
1868 ::vstd::prelude::verus! {
1869 impl<'a> FallibleVmRead<$writer_fallibility> for VmReader<'a, $reader_fallibility> {
1870 #[verifier::external_body]
1871 fn read_fallible(
1872 &mut self,
1873 writer: &mut VmWriter<'_, $writer_fallibility>,
1874 ) -> core::result::Result<usize, (Error, usize)> {
1875 let self_remain = self.end.vaddr - self.cursor.vaddr;
1876 let writer_avail = writer.end.vaddr - writer.cursor.vaddr;
1877 let copy_len = if self_remain < writer_avail { self_remain } else { writer_avail };
1878 if copy_len == 0 {
1879 return Ok(0);
1880 }
1881
1882 let src = self.cursor.vaddr as *const u8;
1883 let dst = writer.cursor.vaddr as *mut u8;
1884 let copied_len = unsafe { memcpy_fallible(dst, src, copy_len) };
1885 self.cursor.vaddr = self.cursor.vaddr + copied_len;
1886 writer.cursor.vaddr = writer.cursor.vaddr + copied_len;
1887
1888 if copied_len < copy_len {
1889 Err((Error::PageFault, copied_len))
1890 } else {
1891 Ok(copied_len)
1892 }
1893 }
1894 }
1895 } // verus!
1896 };
1897}
1898
1899macro_rules! impl_write_fallible {
1900 ($writer_fallibility:ty, $reader_fallibility:ty) => {
1901 ::vstd::prelude::verus! {
1902 impl<'a> FallibleVmWrite<$reader_fallibility> for VmWriter<'a, $writer_fallibility> {
1903 #[verifier::external_body]
1904 fn write_fallible(
1905 &mut self,
1906 reader: &mut VmReader<'_, $reader_fallibility>,
1907 ) -> core::result::Result<usize, (Error, usize)> {
1908 reader.read_fallible(self)
1909 }
1910 }
1911 } // verus!
1912 };
1913}
1914
1915impl_read_fallible!(Fallible, Infallible);
1916impl_read_fallible!(Fallible, Fallible);
1917impl_read_fallible!(Infallible, Fallible);
1918impl_write_fallible!(Fallible, Infallible);
1919impl_write_fallible!(Fallible, Fallible);
1920impl_write_fallible!(Infallible, Fallible);