ostd/mm/io.rs
1// SPDX-License-Identifier: MPL-2.0
2//! Abstractions for reading and writing virtual memory (VM) objects.
3//!
4//! # Safety
5//!
6//! The core virtual memory (VM) access APIs provided by this module are [`VmReader`] and
7//! [`VmWriter`], which allow for writing to or reading from a region of memory _safely_.
8//! `VmReader` and `VmWriter` objects can be constructed from memory regions of either typed memory
9//! (e.g., `&[u8]`) or untyped memory (e.g, [`UFrame`]). Behind the scene, `VmReader` and `VmWriter`
10//! must be constructed via their [`from_user_space`] and [`from_kernel_space`] methods, whose
11//! safety depends on whether the given memory regions are _valid_ or not.
12//!
13//! [`UFrame`]: crate::mm::UFrame
14//! [`from_user_space`]: `VmReader::from_user_space`
15//! [`from_kernel_space`]: `VmReader::from_kernel_space`
16//!
17//! Here is a list of conditions for memory regions to be considered valid:
18//!
19//! - The memory region as a whole must be either typed or untyped memory, not both typed and
20//! untyped.
21//!
22//! - If the memory region is typed, we require that:
23//! - the [validity requirements] from the official Rust documentation must be met, and
24//! - the type of the memory region (which must exist since the memory is typed) must be
25//! plain-old-data, so that the writer can fill it with arbitrary data safely.
26//!
27//! [validity requirements]: core::ptr#safety
28//!
29//! - If the memory region is untyped, we require that:
30//! - the underlying pages must remain alive while the validity requirements are in effect, and
31//! - the kernel must access the memory region using only the APIs provided in this module, but
32//! external accesses from hardware devices or user programs do not count.
33//!
34//! We have the last requirement for untyped memory to be valid because the safety interaction with
35//! other ways to access the memory region (e.g., atomic/volatile memory loads/stores) is not
36//! currently specified. Tis may be relaxed in the future, if appropriate and necessary.
37//!
38//! Note that data races on untyped memory are explicitly allowed (since pages can be mapped to
39//! user space, making it impossible to avoid data races). However, they may produce erroneous
40//! results, such as unexpected bytes being copied, but do not cause soundness problems.
41use crate::specs::arch::PAGE_SIZE;
42use core::marker::PhantomData;
43use core::ops::Range;
44use vstd::pervasive::arbitrary;
45use vstd::prelude::*;
46use vstd::simple_pptr::*;
47use vstd::std_specs::convert::TryFromSpecImpl;
48use vstd_extra::array_ptr::ArrayPtr;
49use vstd_extra::array_ptr::PointsToArray;
50use vstd_extra::ownership::Inv;
51
52use crate::error::*;
53use crate::mm::kspace::{KERNEL_BASE_VADDR, KERNEL_END_VADDR};
54use crate::mm::pod::{Pod, PodOnce};
55use crate::specs::arch::MAX_USERSPACE_VADDR;
56use crate::specs::mm::page_table::Mapping;
57use crate::specs::mm::virt_mem_newer::{MemView, VirtPtr};
58
59verus! {
60
61/// Performs a fallible transfer from `reader` to `writer`.
62///
63/// # Verified Properties
64/// ## Preconditions
65/// - `reader`, `writer`, and their associated owners must satisfy their invariants.
66/// - Each owner must match the corresponding reader or writer.
67/// - Both owners must be marked fallible.
68/// ## Postconditions
69/// - The reader, writer, and both owners still satisfy their invariants.
70/// - The owner parameters tracked by [`VmIoOwner::params_eq`] are preserved for both sides.
71#[verus_spec(r =>
72 with
73 Tracked(owner_r): Tracked<&mut VmIoOwner<'_>>,
74 Tracked(owner_w): Tracked<&mut VmIoOwner<'_>>,
75 requires
76 old(reader).inv(),
77 old(writer).inv(),
78 old(owner_r).inv(),
79 old(owner_w).inv(),
80 old(owner_r).inv_with_reader(*old(reader)),
81 old(owner_w).inv_with_writer(*old(writer)),
82 old(owner_r).is_fallible && old(owner_w).is_fallible, // both fallible
83 ensures
84 reader.inv(),
85 writer.inv(),
86 owner_r.inv(),
87 owner_w.inv(),
88 owner_r.inv_with_reader(*reader),
89 owner_w.inv_with_writer(*writer),
90 owner_r.params_eq(*old(owner_r)),
91 owner_w.params_eq(*old(owner_w)),
92 )]
93pub fn rw_fallible(reader: &mut VmReader<'_>, writer: &mut VmWriter<'_>) -> core::result::Result<
94 usize,
95 (Error, usize),
96> {
97 Ok(0) // placeholder.
98
99}
100
101/// Copies `len` bytes from `src` to `dst`.
102///
103/// # Safety
104///
105/// - `src` must be [valid] for reads of `len` bytes.
106/// - `dst` must be [valid] for writes of `len` bytes.
107///
108/// [valid]: crate::mm::io#safety
109#[inline]
110#[verifier::external_body]
111#[verus_spec(
112 requires
113 KERNEL_BASE_VADDR <= dst && dst + len <= KERNEL_END_VADDR &&
114 KERNEL_BASE_VADDR <= src && src + len <= KERNEL_END_VADDR
115)]
116unsafe fn memcpy(dst: usize, src: usize, len: usize) {
117 // This method is implemented by calling `volatile_copy_memory`. Note that even with the
118 // "volatile" keyword, data races are still considered undefined behavior (UB) in both the Rust
119 // documentation and the C/C++ standards. In general, UB makes the behavior of the entire
120 // program unpredictable, usually due to compiler optimizations that assume the absence of UB.
121 // However, in this particular case, considering that the Linux kernel uses the "volatile"
122 // keyword to implement `READ_ONCE`/`WRITE_ONCE`, the compiler is extremely unlikely to
123 // break our code unless it also breaks the Linux kernel.
124 //
125 // For more details and future possibilities, see
126 // <https://github.com/asterinas/asterinas/pull/1001#discussion_r1667317406>.
127 // SAFETY: The safety is guaranteed by the safety preconditions and the explanation above.
128 core::intrinsics::volatile_copy_memory(dst as *mut u8, src as *const u8, len);
129}
130
131/// Reads a `Pod` value directly from a verified memory view.
132///
133/// # Verified Properties
134/// ## Preconditions
135/// - `ptr` must satisfy [`VirtPtr::inv`].
136/// - The readable range `[ptr.vaddr, ptr.vaddr + size_of::<T>())` must fit in `ptr.range@`.
137/// - Every byte in that range must translate in `mem` and be initialized.
138/// ## Postconditions
139/// - The function returns a `T` value reconstructed from the bytes at `ptr`.
140/// ## Safety
141/// - This function is trusted because Verus cannot currently express the raw-pointer read needed
142/// to reconstruct a typed `Pod` value from verified byte-level memory permissions.
143#[verifier::external_body]
144#[verus_spec(
145 with
146 Tracked(mem): Tracked<&MemView>,
147 requires
148 ptr.inv(),
149 core::mem::size_of::<T>() <= ptr.range@.end - ptr.vaddr,
150 forall|i: usize|
151 #![trigger mem.addr_transl(i)]
152 ptr.vaddr <= i < ptr.vaddr + core::mem::size_of::<T>() ==> {
153 &&& mem.addr_transl(i) is Some
154 &&& mem.memory.contains_key(mem.addr_transl(i).unwrap().0)
155 &&& mem.memory[mem.addr_transl(i).unwrap().0].contents[mem.addr_transl(i).unwrap().1 as int] is Init
156 },
157)]
158fn read_pod_from_view<T: Pod>(ptr: VirtPtr) -> T {
159 unsafe { (ptr.vaddr as *const T).read() }
160}
161
162/// Reads a `PodOnce` value using one volatile memory load from a verified memory view.
163///
164/// # Verified Properties
165/// ## Preconditions
166/// - `ptr` must satisfy [`VirtPtr::inv`].
167/// - The readable range `[ptr.vaddr, ptr.vaddr + size_of::<T>())` must fit in `ptr.range@`.
168/// - `ptr.vaddr` must satisfy the alignment requirement of `T`.
169/// - Every byte in that range must translate in `mem` and be initialized.
170/// ## Postconditions
171/// - The function returns a `T` value read from `ptr` using one volatile load.
172/// ## Safety
173/// - This function is trusted because the underlying volatile typed read relies on raw-pointer
174/// operations that Verus does not yet model directly.
175#[verifier::external_body]
176#[verus_spec(
177 with
178 Tracked(mem): Tracked<&MemView>,
179 requires
180 ptr.inv(),
181 core::mem::size_of::<T>() <= ptr.range@.end - ptr.vaddr,
182 ptr.vaddr % core::mem::align_of::<T>() == 0,
183 forall|i: usize|
184 #![trigger mem.addr_transl(i)]
185 ptr.vaddr <= i < ptr.vaddr + core::mem::size_of::<T>() ==> {
186 &&& mem.addr_transl(i) is Some
187 &&& mem.memory.contains_key(mem.addr_transl(i).unwrap().0)
188 &&& mem.memory[mem.addr_transl(i).unwrap().0].contents[mem.addr_transl(i).unwrap().1 as int] is Init
189 },
190)]
191fn read_once_from_view<T: PodOnce>(ptr: VirtPtr) -> T {
192 let pnt = ptr.vaddr as *const T;
193 unsafe { pnt.read_volatile() }
194}
195
196/// [`VmReader`] is a reader for reading data from a contiguous range of memory.
197///
198/// The memory range read by [`VmReader`] can be in either kernel space or user space.
199/// When the operating range is in kernel space, the memory within that range
200/// is guaranteed to be valid, and the corresponding memory reads are infallible.
201/// When the operating range is in user space, it is ensured that the page table of
202/// the process creating the [`VmReader`] is active for the duration of `'a`,
203/// and the corresponding memory reads are considered fallible.
204///
205/// When perform reading with a [`VmWriter`], if one of them represents typed memory,
206/// it can ensure that the reading range in this reader and writing range in the
207/// writer are not overlapped.
208///
209/// NOTE: The overlap mentioned above is at both the virtual address level
210/// and physical address level. There is not guarantee for the operation results
211/// of [`VmReader`] and [`VmWriter`] in overlapping untyped addresses, and it is
212/// the user's responsibility to handle this situation.
213pub struct VmReader<'a /*, Fallibility = Fallible*/ > {
214 pub id: Ghost<nat>,
215 pub cursor: VirtPtr,
216 pub end: VirtPtr,
217 pub phantom: PhantomData<&'a [u8] /*, Fallibility)*/ >,
218}
219
220/// The memory view used for VM I/O operations.
221///
222/// The readers can think of this as a wrapped permission tokens for operating with a certain
223/// memory view (see [`MemView`]) "owned" by the [`VmIoOwner`] that they are created from, which
224/// are used for allowing callers to use [`VmReader`] and [`VmWriter`] to perform VM I/O operations.
225///
226/// For writers the memory permission must be exclusive so this enum contains an owned [`MemView`]
227/// for the write view; for readers the memory permission can be shared so this enum contains a
228/// reference to a [`MemView`] for the read view (which can be disabled optionally in [`VmSpaceOwner`]).
229///
230/// ⚠️ WARNING: We do not recommend using this enum directly.
231///
232/// [`VmSpaceOwner`]: crate::mm::vm_space::VmSpaceOwner
233pub tracked enum VmIoMemView<'a> {
234 /// An owned memory for writing.
235 WriteView(MemView),
236 /// A shared memory for reading.
237 ReadView(&'a MemView),
238}
239
240/// The owner of a VM I/O operation, which tracks the memory range and the memory view for the
241/// operation.
242///
243/// Basically the caller should be only interested in this struct when using [`VmReader`] and
244/// [`VmWriter`] to perform VM I/O operations, since the safety of these operations depends on the
245/// validity of the memory range and memory view tracked by this struct.
246pub tracked struct VmIoOwner<'a> {
247 /// The unique identifier of this owner.
248 pub id: Ghost<nat>,
249 /// The virtual address range owned by this owner.
250 pub range: Ghost<Range<usize>>,
251 /// Whether this reader is fallible.
252 pub is_fallible: bool,
253 /// The mem view associated with this owner.
254 // pub mem_view: MemView,
255 pub phantom: PhantomData<&'a [u8] /*, Fallibility)*/ >,
256 /// Whether this owner is for kernel space.
257 pub is_kernel: bool,
258 /// The mem view associated with this owner.
259 pub mem_view: Option<VmIoMemView<'a>>,
260}
261
262impl Inv for VmIoOwner<'_> {
263 /// The invariant of a [`VmIoOwner`].
264 ///
265 /// The owned virtual-address range must be ordered. When a memory view is present,
266 /// its mappings must be finite, disjoint, and cover the whole range. If the memory
267 /// view is `None`, then this owner acts as a placeholder that grants no accessible
268 /// memory.
269 open spec fn inv(self) -> bool {
270 // We do allow ZSTs so that empty ranges are valid.
271 &&& self.range@.start <= self.range@.end
272 &&& match self.mem_view {
273 // Case 1: Write (exclusive)
274 Some(VmIoMemView::WriteView(mv)) => {
275 &&& mv.mappings.finite()
276 &&& mv.mappings_are_disjoint()
277 &&& forall|va: usize|
278 self.range@.start <= va < self.range@.end ==> {
279 &&& #[trigger] mv.addr_transl(va) is Some
280 }
281 },
282 // Case 2: Read (shared)
283 Some(VmIoMemView::ReadView(mv)) => {
284 &&& mv.mappings.finite()
285 &&& mv.mappings_are_disjoint()
286 &&& forall|va: usize|
287 self.range@.start <= va < self.range@.end ==> {
288 &&& #[trigger] mv.addr_transl(va) is Some
289 }
290 },
291 // Case 3: Empty/Invalid; this means no memory is accessible,
292 // and we are just creating a placeholder.
293 None => true,
294 }
295 }
296}
297
298impl VmIoOwner<'_> {
299 /// Checks whether this owner overlaps with another owner.
300 #[verifier::inline]
301 pub open spec fn overlaps(self, other: VmIoOwner<'_>) -> bool {
302 !self.disjoint(other)
303 }
304
305 #[verifier::inline]
306 pub open spec fn overlaps_with_range(self, range: Range<usize>) -> bool {
307 &&& self.range@.start <= range.end
308 &&& range.start <= self.range@.end
309 }
310
311 /// Checks whether this owner is disjoint with another owner.
312 #[verifier::inline]
313 pub open spec fn disjoint(self, other: VmIoOwner<'_>) -> bool {
314 &&& !self.overlaps_with_range(other.range@)
315 &&& match (self.mem_view, other.mem_view) {
316 (Some(lhs), Some(rhs)) => match (lhs, rhs) {
317 (VmIoMemView::WriteView(lmv), VmIoMemView::WriteView(rmv)) => {
318 lmv.mappings.disjoint(rmv.mappings)
319 },
320 (VmIoMemView::WriteView(lmv), VmIoMemView::ReadView(rmv)) => {
321 lmv.mappings.disjoint(rmv.mappings)
322 },
323 (VmIoMemView::ReadView(lmv), VmIoMemView::WriteView(rmv)) => {
324 lmv.mappings.disjoint(rmv.mappings)
325 },
326 (VmIoMemView::ReadView(lmv), VmIoMemView::ReadView(rmv)) => {
327 lmv.mappings.disjoint(rmv.mappings)
328 },
329 },
330 _ => true,
331 }
332 }
333
334 #[verifier::inline]
335 pub open spec fn params_eq(self, other: VmIoOwner<'_>) -> bool {
336 &&& self.range@ == other.range@
337 &&& self.is_fallible == other.is_fallible
338 }
339
340 /// Changes the fallibility of this owner.
341 ///
342 /// # Verified Properties
343 /// ## Preconditions
344 /// - The owner must satisfy its invariant.
345 /// - The new fallibility must differ from the current one.
346 /// ## Postconditions
347 /// - The updated owner still satisfies its invariant.
348 /// - The `is_fallible` field is updated to `fallible`.
349 pub proof fn change_fallible(tracked &mut self, tracked fallible: bool)
350 requires
351 old(self).inv(),
352 old(self).is_fallible != fallible,
353 ensures
354 self.inv(),
355 self.is_fallible == fallible,
356 {
357 self.is_fallible = fallible;
358 }
359
360 /// Advances the cursor of the reader/writer by the given number of bytes.
361 ///
362 /// Note this will return the advanced `VmIoMemView` as the previous permission
363 /// is no longer needed and must be discarded then.
364 ///
365 /// # Verified Properties
366 /// ## Preconditions
367 /// - The owner must satisfy its invariant.
368 /// - The owner must carry a memory view.
369 /// - `nbytes` must not exceed the remaining length of the owned range.
370 /// ## Postconditions
371 /// - The updated owner still satisfies its invariant.
372 /// - The start of the owned range is advanced by `nbytes`.
373 /// - The end of the owned range and the other owner parameters are preserved.
374 /// - The returned [`VmIoMemView`] is the portion advanced past.
375 pub proof fn advance(tracked &mut self, nbytes: usize) -> (tracked res: VmIoMemView<'_>)
376 requires
377 old(self).inv(),
378 old(self).mem_view is Some,
379 nbytes <= old(self).range@.end - old(self).range@.start,
380 ensures
381 self.inv(),
382 self.range@.start == old(self).range@.start + nbytes,
383 self.range@.end == old(self).range@.end,
384 self.is_fallible == old(self).is_fallible,
385 self.id == old(self).id,
386 self.is_kernel == old(self).is_kernel,
387 {
388 let start = self.range@.start;
389 let old_end = self.range@.end;
390 self.range = Ghost((start + nbytes) as usize..self.range@.end);
391 // Take this option and leaves the `None` in its place temporarily.
392 let tracked inner = self.mem_view.tracked_take();
393 let tracked ret_perm = match inner {
394 VmIoMemView::WriteView(mv) => {
395 let tracked (lhs, rhs) = mv.split(start, nbytes);
396
397 assert(forall|va: usize|
398 start <= va < start + nbytes ==> mv.addr_transl(va) is Some);
399
400 // Update the mem view to the remaining part.
401 self.mem_view = Some(VmIoMemView::WriteView(rhs));
402
403 assert(self.inv()) by {
404 assert forall|va: usize| start + nbytes <= va < old_end implies {
405 #[trigger] rhs.addr_transl(va) is Some
406 } by {
407 assert(mv.addr_transl(va) is Some) by {
408 assert(old(self).inv());
409 }
410
411 let old_mappings = mv.mappings.filter(
412 |m: Mapping| m.va_range.start <= va < m.va_range.end,
413 );
414 let new_mappings = rhs.mappings.filter(
415 |m: Mapping| m.va_range.start <= va < m.va_range.end,
416 );
417 assert(old_mappings.len() != 0);
418 assert(rhs.mappings =~= mv.mappings.filter(
419 |m: Mapping| m.va_range.end > start + nbytes,
420 ));
421 assert(new_mappings =~= mv.mappings.filter(
422 |m: Mapping|
423 m.va_range.start <= va < m.va_range.end && m.va_range.end > start
424 + nbytes,
425 ));
426
427 assert(new_mappings.len() != 0) by {
428 broadcast use vstd::set::group_set_axioms;
429
430 let m = old_mappings.choose();
431 // m.start <= va < m.end
432 assert(start + nbytes <= va);
433 assert(m.va_range.end > va) by {
434 if (m.va_range.end <= va) {
435 assert(!old_mappings.contains(m));
436 }
437 }
438 assert(m.va_range.end > start + nbytes);
439 assert(old_mappings.contains(m));
440 assert(old_mappings.subset_of(mv.mappings));
441 assert(rhs.mappings.contains(m));
442 assert(new_mappings.contains(m));
443 assert(new_mappings.len() >= 1);
444 }
445 }
446 }
447
448 VmIoMemView::WriteView(lhs)
449 },
450 VmIoMemView::ReadView(mv) => {
451 let tracked sub_view = mv.borrow_at(start, nbytes);
452 // Since reads are shared so we don't need to
453 // modify the original view; here we just put
454 // it back.
455 self.mem_view = Some(VmIoMemView::ReadView(mv));
456 VmIoMemView::ReadView(sub_view)
457 },
458 };
459
460 ret_perm
461 }
462
463 /// Unwraps the read view.
464 ///
465 /// # Verified Properties
466 /// ## Preconditions
467 /// - The owner must satisfy its invariant.
468 /// - The owner must carry a read memory view.
469 /// ## Postconditions
470 /// - The returned reference is exactly the read view stored in `self.mem_view`.
471 pub proof fn tracked_read_view_unwrap(tracked &self) -> (tracked r: &MemView)
472 requires
473 self.inv(),
474 self.mem_view matches Some(VmIoMemView::ReadView(_)),
475 ensures
476 VmIoMemView::ReadView(r) == self.mem_view.unwrap(),
477 {
478 match self.mem_view {
479 Some(VmIoMemView::ReadView(r)) => r,
480 _ => { proof_from_false() },
481 }
482 }
483}
484
485impl Inv for VmWriter<'_> {
486 open spec fn inv(self) -> bool {
487 &&& self.cursor.inv()
488 &&& self.end.inv()
489 &&& self.cursor.vaddr
490 <= self.end.vaddr
491 // Ensure that they point to the same range as in VmIoOwner.
492 &&& self.cursor.range@ == self.end.range@
493 }
494}
495
496impl Inv for VmReader<'_> {
497 open spec fn inv(self) -> bool {
498 &&& self.cursor.inv()
499 &&& self.end.inv()
500 &&& self.cursor.vaddr
501 <= self.end.vaddr
502 // Ensure that they point to the same range as in VmIoOwner.
503 &&& self.cursor.range@ == self.end.range@
504 }
505}
506
507impl VmIoOwner<'_> {
508 pub open spec fn inv_with_reader(
509 self,
510 reader: VmReader<'_ /* Fallibility */ >,
511 ) -> bool {
512 &&& self.inv()
513 &&& self.range@.start == reader.cursor.vaddr
514 &&& self.range@.end == reader.end.vaddr
515 &&& self.id == reader.id
516 &&& self.mem_view matches Some(VmIoMemView::ReadView(mv)) ==> {
517 // Ensure that the mem view covers the entire range.
518 forall|va: usize|
519 self.range@.start <= va < self.range@.end ==> {
520 &&& #[trigger] mv.addr_transl(va) is Some
521 }
522 }
523 }
524
525 pub open spec fn inv_with_writer(
526 self,
527 writer: VmWriter<'_ /* Fallibility */ >,
528 ) -> bool {
529 &&& self.inv()
530 &&& self.range@.start == writer.cursor.vaddr
531 &&& self.range@.end == writer.end.vaddr
532 &&& self.id == writer.id
533 &&& self.mem_view matches Some(VmIoMemView::WriteView(mv)) ==> {
534 // Ensure that the mem view covers the entire range.
535 forall|va: usize|
536 self.range@.start <= va < self.range@.end ==> {
537 &&& #[trigger] mv.addr_transl(va) is Some
538 }
539 }
540 }
541}
542
543/// [`VmWriter`] is a writer for writing data to a contiguous range of memory.
544///
545/// The memory range write by [`VmWriter`] can be in either kernel space or user space.
546/// When the operating range is in kernel space, the memory within that range
547/// is guaranteed to be valid, and the corresponding memory writes are infallible.
548/// When the operating range is in user space, it is ensured that the page table of
549/// the process creating the [`VmWriter`] is active for the duration of `'a`,
550/// and the corresponding memory writes are considered fallible.
551///
552/// When perform writing with a [`VmReader`], if one of them represents typed memory,
553/// it can ensure that the writing range in this writer and reading range in the
554/// reader are not overlapped.
555///
556/// NOTE: The overlap mentioned above is at both the virtual address level
557/// and physical address level. There is not guarantee for the operation results
558/// of [`VmReader`] and [`VmWriter`] in overlapping untyped addresses, and it is
559/// the user's responsibility to handle this situation.
560pub struct VmWriter<'a /*, Fallibility = Fallible*/ > {
561 pub id: Ghost<nat>,
562 pub cursor: VirtPtr,
563 pub end: VirtPtr,
564 pub phantom: PhantomData<&'a [u8] /*, Fallibility)*/ >,
565}
566
567#[verus_verify]
568impl<'a> VmWriter<'a /* Infallible */ > {
569 /// Constructs a [`VmWriter`] from a pointer and a length, which represents
570 /// a memory range in kernel space.
571 ///
572 /// # Verified Properties
573 /// ## Preconditions
574 /// - The memory region represented by `ptr` and `len` must be valid for writes of `len` bytes
575 /// during the entire lifetime `a`. This means that the underlying pages must remain alive,
576 /// and the kernel must access the memory region using only the APIs provided in this module.
577 /// - The range `ptr.vaddr..ptr.vaddr + len` must represent a kernel space memory range.
578 /// ## Postconditions
579 /// - An infallible [`VmWriter`] will be created with the range `ptr.vaddr..ptr.vaddr + len`.
580 /// - The created [`VmWriter`] will have a unique identifier `id`, and its cursor will be
581 /// initialized to `ptr`.
582 /// - The created [`VmWriter`] will be associated with a [`VmIoOwner`] that has the same `id`, the
583 /// same memory range, and is marked as kernel space and infallible.
584 /// - The memory view of the associated [`VmIoOwner`] will be `None`, indicating that it does not
585 /// have any specific permissions yet.
586 /// ## Safety
587 ///
588 /// `ptr` must be [valid] for writes of `len` bytes during the entire lifetime `a`.
589 ///
590 /// [valid]: crate::mm::io#safety
591 #[verus_spec(r =>
592 with
593 Ghost(id): Ghost<nat>,
594 Tracked(fallible): Tracked<bool>,
595 -> owner: Tracked<VmIoOwner<'a>>,
596 requires
597 !fallible,
598 ptr.inv(),
599 ptr.range@.start == ptr.vaddr,
600 len == ptr.range@.end - ptr.range@.start,
601 len == 0 || KERNEL_BASE_VADDR <= ptr.vaddr,
602 len == 0 || ptr.vaddr + len <= KERNEL_END_VADDR,
603 ensures
604 owner@.inv(),
605 owner@.inv_with_writer(r),
606 owner@.is_fallible == fallible,
607 owner@.id == id,
608 owner@.is_kernel,
609 owner@.mem_view is None,
610 r.cursor == ptr,
611 r.end.vaddr == ptr.vaddr + len,
612 r.cursor.range@ == ptr.range@,
613 r.end.range@ == ptr.range@,
614 )]
615 pub unsafe fn from_kernel_space(ptr: VirtPtr, len: usize) -> Self {
616 let ghost range = ptr.range@;
617 let tracked owner = VmIoOwner {
618 id: Ghost(id),
619 range: Ghost(range),
620 is_fallible: fallible,
621 phantom: PhantomData,
622 is_kernel: true,
623 mem_view: None,
624 };
625
626 let ghost range = ptr.vaddr..(ptr.vaddr + len) as usize;
627 let end = VirtPtr { vaddr: ptr.vaddr + len, range: Ghost(range) };
628
629 proof_with!(|= Tracked(owner));
630 Self { id: Ghost(id), cursor: ptr, end, phantom: PhantomData }
631 }
632
633 /// Converts a PoD value into a [`VmWriter`] that writes to the memory occupied by the PoD value.
634 ///
635 /// # Verified Properties
636 /// ## Preconditions
637 /// None as the Rust's type system guarantees that if `T` is valid, then we can always create a
638 /// valid `VmWriter` for it.
639 /// ## Postconditions
640 /// - If the memory region occupied by `val` is valid for writes, then an infallible [`VmWriter`]
641 /// will be created that points to the memory region of `val`.
642 /// - If the memory region occupied by `val` is not valid for writes, then an [`IoError`] will be
643 /// returned.
644 ///
645 /// [`IoError`]: `Error::IoError`
646 #[verus_spec(r =>
647 with
648 Ghost(id): Ghost<nat>,
649 -> owner: Tracked<Result<VmIoOwner<'a>>>,
650 ensures
651 r is Ok <==> owner@ is Ok,
652 r matches Ok(r) ==> {
653 &&& owner@ matches Ok(owner) ==> {
654 &&& r.inv()
655 &&& r.avail_spec() == core::mem::size_of::<T>()
656 &&& owner.inv()
657 &&& owner.inv_with_writer(r)
658 }
659 }
660 )]
661 pub fn from_pod<T: Pod>(mut val: T) -> Result<VmWriter<'a /* Infallible */ >> {
662 proof_decl! {
663 let tracked mut perm;
664 }
665
666 let (pnt, len) = val.as_bytes_mut();
667
668 if len != 0 && (pnt < KERNEL_BASE_VADDR || len >= KERNEL_END_VADDR || pnt > KERNEL_END_VADDR
669 - len) || pnt == 0 {
670 proof_with!(|= Tracked(Err(Error::IoError)));
671 Err(Error::IoError)
672 } else {
673 let ghost range = pnt..(pnt + len) as usize;
674 let vptr = VirtPtr { vaddr: pnt as usize, range: Ghost(range) };
675 let r = unsafe {
676 #[verus_spec(with Ghost(id), Tracked(false) => Tracked(perm))]
677 VmWriter::from_kernel_space(vptr, len)
678 };
679
680 proof_with!(|= Tracked(Ok(perm)));
681 Ok(r)
682 }
683 }
684}
685
686impl Clone for VmReader<'_ /* Fallibility */ > {
687 /// [`Clone`] can be implemented for [`VmReader`]
688 /// because it either points to untyped memory or represents immutable references.
689 ///
690 /// Note that we cannot implement [`Clone`] for [`VmWriter`]
691 /// because it can represent mutable references, which must remain exclusive.
692 fn clone(&self) -> Self {
693 Self { id: self.id, cursor: self.cursor, end: self.end, phantom: PhantomData }
694 }
695}
696
697#[verus_verify]
698impl<'a> VmReader<'a /* Infallible */ > {
699 /// Constructs a [`VmReader`] from a pointer and a length, which represents
700 /// a memory range in USER space.
701 ///
702 /// ⚠️ WARNING: Currently not implemented yet.
703 ///
704 /// # Verified Properties
705 /// ## Preconditions
706 /// - `ptr` must satisfy [`VirtPtr::inv`].
707 /// ## Postconditions
708 /// - The returned [`VmReader`] satisfies its invariant.
709 /// - The returned reader is associated with a [`VmIoOwner`] that satisfies both [`VmIoOwner::inv`]
710 /// and [`VmIoOwner::inv_with_reader`].
711 /// - The owner has the same range as `ptr`, has no memory view yet, and is marked as user-space
712 /// and infallible.
713 #[verifier::external_body]
714 #[verus_spec(r =>
715 with
716 Ghost(id): Ghost<nat>,
717 -> owner: Tracked<VmIoOwner<'a>>,
718 requires
719 ptr.inv(),
720 ensures
721 r.inv(),
722 owner@.id == id,
723 owner@.inv(),
724 owner@.inv_with_reader(r),
725 owner@.range == ptr.range@,
726 owner@.mem_view is None,
727 !owner@.is_kernel,
728 !owner@.is_fallible,
729 )]
730 pub unsafe fn from_user_space(ptr: VirtPtr) -> Self {
731 // SAFETY: The caller must ensure the safety requirements.
732 unimplemented!()
733 }
734
735 /// Constructs a [`VmReader`] from a pointer and a length, which represents
736 /// a memory range in kernel space.
737 ///
738 /// # Verified Properties
739 /// ## Preconditions
740 /// - The memory region represented by `ptr` and `len` must be valid for reads of `len` bytes
741 /// during the entire lifetime `a`. This means that the underlying pages must remain alive,
742 /// and the kernel must access the memory region using only the APIs provided in this module.
743 /// - The range `ptr.vaddr..ptr.vaddr + len` must represent a kernel space memory range.
744 /// ## Postconditions
745 /// - An infallible [`VmReader`] will be created with the range `ptr.vaddr..ptr.vaddr + len`.
746 /// - The created [`VmReader`] will have a unique identifier `id`, and its cursor will be
747 /// initialized to `ptr`.
748 /// - The created [`VmReader`] will be associated with a [`VmIoOwner`] that has the same `id`,
749 /// the same memory range, and is marked as kernel space and infallible.
750 /// ## Safety
751 ///
752 /// `ptr` must be [valid] for reads of `len` bytes during the entire lifetime `a`.
753 ///
754 /// [valid]: crate::mm::io#safety
755 #[verus_spec(r =>
756 with
757 Ghost(id): Ghost<nat>,
758 -> owner: Tracked<VmIoOwner<'a>>,
759 requires
760 ptr.inv(),
761 ptr.range@.start == ptr.vaddr,
762 len == ptr.range@.end - ptr.range@.start,
763 len == 0 || KERNEL_BASE_VADDR <= ptr.vaddr,
764 len == 0 || ptr.vaddr + len <= KERNEL_END_VADDR,
765 ensures
766 owner@.inv(),
767 owner@.inv_with_reader(r),
768 r.cursor.vaddr == ptr.vaddr,
769 r.end.vaddr == ptr.vaddr + len,
770 r.cursor == ptr,
771 r.end.range@ == ptr.range@,
772 owner@.is_kernel,
773 owner@.id == id,
774 )]
775 pub unsafe fn from_kernel_space(ptr: VirtPtr, len: usize) -> Self {
776 let tracked owner = VmIoOwner {
777 id: Ghost(id),
778 range: Ghost(ptr.vaddr..(ptr.vaddr + len) as usize),
779 is_fallible: false,
780 phantom: PhantomData,
781 is_kernel: true,
782 // This is left empty as will be populated later.
783 mem_view: None,
784 };
785
786 let ghost range = ptr.vaddr..(ptr.vaddr + len) as usize;
787 let end = VirtPtr { vaddr: ptr.vaddr + len, range: Ghost(range) };
788
789 proof_with!(|= Tracked(owner));
790 Self { id: Ghost(id), cursor: ptr, end, phantom: PhantomData }
791 }
792
793 /// Converts a PoD value into a [`VmReader`] that reads from the memory occupied by the PoD value.
794 ///
795 /// # Verified Properties
796 /// ## Preconditions
797 /// None as the Rust's type system guarantees that if `&mut T` is valid, then we can always
798 /// create a valid [`VmReader`] for it.
799 /// ## Postconditions
800 /// - If the memory region occupied by `val` is valid for reads, then an infallible [`VmReader`]
801 /// will be created that points to the memory region of `val`.
802 /// - If the memory region occupied by `val` is not valid for reads, then an [`IoError`] will be
803 /// returned.
804 ///
805 /// [`IoError`]: `Error::IoError`
806 #[verus_spec(r =>
807 with
808 Ghost(id): Ghost<nat>,
809 -> owner: Tracked<Result<VmIoOwner<'a>>>,
810 ensures
811 r is Ok <==> owner@ is Ok,
812 r matches Ok(r) ==> {
813 &&& owner@ matches Ok(owner) ==> {
814 &&& r.inv()
815 &&& r.remain_spec() == core::mem::size_of::<T>()
816 &&& owner.inv()
817 &&& owner.inv_with_reader(r)
818 }
819 }
820 )]
821 pub fn from_pod<T: Pod>(val: &mut T) -> Result<VmReader<'a /* Infallible */ >> {
822 proof_decl! {
823 let tracked mut perm;
824 }
825
826 let (pnt, len) = val.as_bytes_mut();
827
828 if len != 0 && (pnt < KERNEL_BASE_VADDR || len >= KERNEL_END_VADDR || pnt > KERNEL_END_VADDR
829 - len) || pnt == 0 {
830 proof_with!(|= Tracked(Err(Error::IoError)));
831 Err(Error::IoError)
832 } else {
833 let ghost range = pnt..(pnt + len) as usize;
834 let vptr = VirtPtr { vaddr: pnt, range: Ghost(range) };
835 let r = unsafe {
836 #[verus_spec(with Ghost(id) => Tracked(perm))]
837 VmReader::from_kernel_space(vptr, len)
838 };
839
840 proof {
841 assert(r.inv());
842 assert(perm.inv());
843 }
844
845 proof_with!(|= Tracked(Ok(perm)));
846 Ok(r)
847 }
848 }
849}
850
851#[verus_verify]
852impl<'a> TryFrom<&'a [u8]> for VmReader<'a /* Infallible */ > {
853 type Error = Error;
854
855 #[verus_spec()]
856 fn try_from(slice: &'a [u8]) -> Result<Self> {
857 proof_decl! {
858 let tracked mut perm;
859 }
860
861 let addr = slice.as_ptr() as usize;
862
863 if slice.len() != 0 && (addr < KERNEL_BASE_VADDR || slice.len() >= KERNEL_END_VADDR || addr
864 > KERNEL_END_VADDR - slice.len()) || addr == 0 {
865 return Err(Error::IoError);
866 }
867 // SAFETY:
868 // - The memory range points to typed memory.
869 // - The validity requirements for read accesses are met because the pointer is converted
870 // from an immutable reference that outlives the lifetime `'a`.
871 // - The type, i.e., the `u8` slice, is plain-old-data.
872
873 let ghost range = addr..(addr + slice.len()) as usize;
874 let vptr = VirtPtr { vaddr: addr, range: Ghost(range) };
875
876 Ok(
877 unsafe {
878 #[verus_spec(with Ghost(arbitrary()) => Tracked(perm))]
879 Self::from_kernel_space(vptr, slice.len())
880 },
881 )
882 }
883}
884
885impl<'a> TryFromSpecImpl<&'a [u8]> for VmReader<'a> {
886 open spec fn obeys_try_from_spec() -> bool {
887 true
888 }
889
890 open spec fn try_from_spec(slice: &'a [u8]) -> Result<Self> {
891 let addr = slice.as_ptr() as usize;
892 let len = slice.len();
893
894 if len != 0 && (addr < KERNEL_BASE_VADDR || len >= KERNEL_END_VADDR || addr
895 > KERNEL_END_VADDR - slice.len()) || addr == 0 {
896 Err(Error::IoError)
897 } else {
898 Ok(
899 Self {
900 // Id is not important here.
901 id: Ghost(arbitrary()),
902 cursor: VirtPtr { vaddr: addr, range: Ghost(addr..(addr + len) as usize) },
903 end: VirtPtr {
904 vaddr: (addr + len) as usize,
905 range: Ghost(addr..(addr + len) as usize),
906 },
907 phantom: PhantomData,
908 },
909 )
910 }
911 }
912}
913
914// Perhaps we can implement `tryfrom` instead.
915// This trait method should be discarded as we do not want to make VmWriter <N> ?
916#[verus_verify]
917impl<'a> TryFrom<&'a [u8]> for VmWriter<'a /* Infallible */ > {
918 type Error = Error;
919
920 // fn try_from(slice: ArrayPtr<u8, N>, Tracked(owner))??
921 #[verus_spec()]
922 fn try_from(slice: &'a [u8] /* length... */ ) -> Result<Self> {
923 proof_decl! {
924 let tracked mut perm;
925 }
926
927 let addr = slice.as_ptr() as usize;
928
929 if slice.len() != 0 && (addr < KERNEL_BASE_VADDR || slice.len() >= KERNEL_END_VADDR || addr
930 > KERNEL_END_VADDR - slice.len()) || addr == 0 {
931 return Err(Error::IoError);
932 }
933 // SAFETY:
934 // - The memory range points to typed memory.
935 // - The validity requirements for write accesses are met because the pointer is converted
936 // from a mutable reference that outlives the lifetime `'a`.
937 // - The type, i.e., the `u8` slice, is plain-old-data.
938
939 let ghost range = addr..(addr + slice.len()) as usize;
940 let vptr = VirtPtr { vaddr: addr, range: Ghost(range) };
941
942 proof {
943 assert(vptr.inv());
944 }
945
946 Ok(
947 unsafe {
948 #[verus_spec(with Ghost(arbitrary()), Tracked(false) => Tracked(perm))]
949 Self::from_kernel_space(vptr, slice.len())
950 },
951 )
952 }
953}
954
955impl<'a> TryFromSpecImpl<&'a [u8]> for VmWriter<'a> {
956 open spec fn obeys_try_from_spec() -> bool {
957 true
958 }
959
960 open spec fn try_from_spec(slice: &'a [u8]) -> Result<Self> {
961 let addr = slice.as_ptr() as usize;
962 let len = slice.len();
963
964 if len != 0 && (addr < KERNEL_BASE_VADDR || len >= KERNEL_END_VADDR || addr
965 > KERNEL_END_VADDR - slice.len()) || addr == 0 {
966 Err(Error::IoError)
967 } else {
968 Ok(
969 Self {
970 id: Ghost(arbitrary()),
971 cursor: VirtPtr { vaddr: addr, range: Ghost(addr..(addr + len) as usize) },
972 end: VirtPtr {
973 vaddr: (addr + len) as usize,
974 range: Ghost(addr..(addr + len) as usize),
975 },
976 phantom: PhantomData,
977 },
978 )
979 }
980 }
981}
982
983type Result<T> = core::result::Result<T, Error>;
984
985/// A trait that enables reading/writing data from/to a VM object,
986/// e.g., [`USegment`], [`Vec<UFrame>`] and [`UFrame`].
987///
988/// # Concurrency
989///
990/// The methods may be executed by multiple concurrent reader and writer
991/// threads. In this case, if the results of concurrent reads or writes
992/// desire predictability or atomicity, the users should add extra mechanism
993/// for such properties.
994///
995/// [`USegment`]: crate::mm::USegment
996/// [`UFrame`]: crate::mm::UFrame
997///
998/// Note: In this trait we follow the standard of `vstd` trait that allows precondition and
999/// postcondition overriding by introducing `obeys_`, `_requires`, and `_ensures` spec functions.
1000///
1001/// `P` is the type of the permission/ownership token used to track the state of the VM object.
1002pub trait VmIo<P: Sized>: Send + Sync + Sized {
1003 // spec fn reader_inv_with_owner(self, owner: VmIoOwner<'_>) -> bool;
1004 /// If this returns true then the `requires` clause of `read` will be active.
1005 spec fn obeys_vmio_read_requires() -> bool;
1006
1007 /// If this returns true then the `ensures` clause of `read` will be active.
1008 spec fn obeys_vmio_read_ensures() -> bool;
1009
1010 /// If this returns true then the `requires` clause of `write` will be active.
1011 spec fn obeys_vmio_write_requires() -> bool;
1012
1013 /// If this returns true then the `ensures` clause of `write` will be active.
1014 spec fn obeys_vmio_write_ensures() -> bool;
1015
1016 /// Checks whether the preconditions for `read` are met.
1017 spec fn vmio_read_requires(self, owner: P, offset: usize) -> bool;
1018
1019 /// Checks whether the postconditions for `read` are met.
1020 spec fn vmio_read_ensures(self, owner: P, offset: usize, new_owner: P, r: Result<()>) -> bool;
1021
1022 /// Checks whether the preconditions for `write` are met.
1023 spec fn vmio_write_requires(self, owner: P, offset: usize) -> bool;
1024
1025 /// Checks whether the postconditions for `write` are met.
1026 spec fn vmio_write_ensures(self, owner: P, offset: usize, new_owner: P, r: Result<()>) -> bool;
1027
1028 fn read_slice<T: Pod, const N: usize>(
1029 &self,
1030 offset: usize,
1031 slice: ArrayPtr<T, N>,
1032 Tracked(slice_owner): Tracked<&mut PointsToArray<u8, N>>,
1033 Tracked(owner): Tracked<&mut P>,
1034 ) -> (r: Result<()>);
1035
1036 fn write_bytes<const N: usize>(
1037 &self,
1038 offset: usize,
1039 bytes: ArrayPtr<u8, N>,
1040 Tracked(bytes_owner): Tracked<&mut PointsToArray<u8, N>>,
1041 Tracked(owner): Tracked<&mut P>,
1042 ) -> (r: Result<()>)
1043 // requires
1044 // Self::obeys_vmio_write_requires() ==> Self::vmio_write_requires(
1045 // *self,
1046 // *old(owner),
1047 // offset,
1048 // ),
1049 // ensures
1050 // Self::obeys_vmio_write_ensures() ==> Self::vmio_write_ensures(
1051 // *self,
1052 // *old(owner),
1053 // offset,
1054 // *owner,
1055 // r,
1056 // ),
1057 ;
1058
1059 fn write_val<T: Pod>(&self, offset: usize, val: T, Tracked(owner): Tracked<&mut P>) -> (r:
1060 Result<()>)
1061 // requires
1062 // Self::obeys_vmio_write_requires() ==> Self::vmio_write_requires(
1063 // *self,
1064 // *old(owner),
1065 // offset,
1066 // ),
1067 // ensures
1068 // Self::obeys_vmio_write_ensures() ==> Self::vmio_write_ensures(
1069 // *self,
1070 // *old(owner),
1071 // offset,
1072 // *owner,
1073 // r,
1074 // ),
1075 ;
1076
1077 fn write_slice<T: Pod, const N: usize>(
1078 &self,
1079 offset: usize,
1080 slice: ArrayPtr<T, N>,
1081 Tracked(slice_owner): Tracked<&mut PointsToArray<T, N>>,
1082 Tracked(owner): Tracked<&mut P>,
1083 ) -> (r: Result<()>)
1084 requires
1085 Self::obeys_vmio_write_requires() ==> Self::vmio_write_requires(
1086 *self,
1087 *old(owner),
1088 offset,
1089 ),
1090 ensures
1091 Self::obeys_vmio_write_ensures() ==> Self::vmio_write_ensures(
1092 *self,
1093 *old(owner),
1094 offset,
1095 *owner,
1096 r,
1097 ),
1098 ;
1099}
1100
1101/// A trait that enables reading/writing data from/to a VM object using one non-tearing memory
1102/// load/store.
1103///
1104/// See also [`VmIo`], which enables reading/writing data from/to a VM object without the guarantee
1105/// of using one non-tearing memory load/store.
1106pub trait VmIoOnce: Sized {
1107 spec fn obeys_vmio_once_read_requires() -> bool;
1108
1109 spec fn obeys_vmio_once_write_requires() -> bool;
1110
1111 spec fn obeys_vmio_once_read_ensures() -> bool;
1112
1113 spec fn obeys_vmio_once_write_ensures() -> bool;
1114
1115 /// Reads a value of the `PodOnce` type at the specified offset using one non-tearing memory
1116 /// load.
1117 ///
1118 /// Except that the offset is specified explicitly, the semantics of this method is the same as
1119 /// [`VmReader::read_once`].
1120 fn read_once<T: PodOnce>(&self, offset: usize) -> Result<T>;
1121
1122 /// Writes a value of the `PodOnce` type at the specified offset using one non-tearing memory
1123 /// store.
1124 ///
1125 /// Except that the offset is specified explicitly, the semantics of this method is the same as
1126 /// [`VmWriter::write_once`].
1127 fn write_once<T: PodOnce>(&self, offset: usize, new_val: &T) -> Result<()>;
1128}
1129
1130#[verus_verify]
1131impl VmReader<'_> {
1132 pub open spec fn remain_spec(&self) -> usize {
1133 (self.end.vaddr - self.cursor.vaddr) as usize
1134 }
1135
1136 /// Returns the number of remaining bytes that can be read.
1137 ///
1138 /// # Verified Properties
1139 /// ## Preconditions
1140 /// - `self` must satisfy its invariant.
1141 /// ## Postconditions
1142 /// - The returned value equals [`Self::remain_spec`].
1143 #[inline]
1144 #[verus_spec(r =>
1145 requires
1146 self.inv(),
1147 ensures
1148 r == self.remain_spec(),
1149 )]
1150 #[verifier::when_used_as_spec(remain_spec)]
1151 pub fn remain(&self) -> usize {
1152 self.end.vaddr - self.cursor.vaddr
1153 }
1154
1155 /// Advances the cursor by `len` bytes.
1156 ///
1157 /// # Verified Properties
1158 /// ## Preconditions
1159 /// - `self` must satisfy its invariant.
1160 /// - `len` must not exceed the remaining readable bytes.
1161 /// ## Postconditions
1162 /// - `self` still satisfies its invariant.
1163 /// - The cursor advances by `len`.
1164 /// - The remaining readable bytes decrease by `len`.
1165 /// - The reader identity and end cursor are preserved.
1166 #[inline]
1167 #[verus_spec(
1168 requires
1169 old(self).inv(),
1170 len <= old(self).remain_spec(),
1171 ensures
1172 self.inv(),
1173 self.cursor.vaddr == old(self).cursor.vaddr + len,
1174 self.remain_spec() == old(self).remain_spec() - len,
1175 self.id == old(self).id,
1176 self.end == old(self).end,
1177 )]
1178 pub fn advance(&mut self, len: usize) {
1179 self.cursor.vaddr = self.cursor.vaddr + len;
1180 }
1181
1182 /// Reads data from `self` and writes it into the provided `writer`.
1183 ///
1184 /// This function acts as the source side of a transfer. It copies data from
1185 /// the current instance (`self`) into the destination `writer`, up to the limit
1186 /// of available data in `self` or available space in `writer` (whichever is smaller).
1187 ///
1188 /// # Logic
1189 ///
1190 /// 1. Calculates the copy length: `min(self.remaining_data, writer.available_space)`.
1191 /// 2. Copies bytes from `self`'s internal buffer to `writer`'s buffer.
1192 /// 3. Advances the cursors of both `self` and `writer`.
1193 ///
1194 /// # Arguments
1195 ///
1196 /// * `writer` - The destination `VmWriter` where the data will be copied to.
1197 ///
1198 /// # Returns
1199 ///
1200 /// The number of bytes actually transferred.
1201 ///
1202 /// # Verified Properties
1203 /// ## Preconditions
1204 /// - The reader, writer, and both associated owners must satisfy their invariants.
1205 /// - The owners must match the given reader and writer.
1206 /// - The writer owner must carry a write memory view.
1207 /// - The source and destination ranges must not overlap.
1208 /// - The reader owner must provide initialized readable memory for the readable range.
1209 /// ## Postconditions
1210 /// - The reader, writer, and both owners still satisfy their invariants.
1211 /// - The owners still match the updated reader and writer.
1212 /// - The returned byte count equals the minimum of readable bytes and writable bytes.
1213 /// - Both cursors advance by exactly the returned byte count.
1214 #[verus_spec(r =>
1215 with
1216 Tracked(owner_r): Tracked<&mut VmIoOwner<'_>>,
1217 Tracked(owner_w): Tracked<&mut VmIoOwner<'_>>,
1218 requires
1219 old(self).inv(),
1220 old(writer).inv(),
1221 old(owner_r).inv(),
1222 old(owner_w).inv(),
1223 old(owner_r).inv_with_reader(*old(self)),
1224 old(owner_w).inv_with_writer(*old(writer)),
1225 old(owner_w).mem_view matches Some(VmIoMemView::WriteView(_)),
1226 // Non-overlapping requirements.
1227 old(writer).cursor.range@.start >= old(self).cursor.range@.end
1228 || old(self).cursor.range@.start >= old(writer).cursor.range@.end,
1229 old(owner_r).mem_view matches Some(VmIoMemView::ReadView(mem_src)) &&
1230 forall|i: usize|
1231 #![trigger mem_src.addr_transl(i)]
1232 old(self).cursor.vaddr <= i < old(self).cursor.vaddr + old(self).remain_spec() ==> {
1233 &&& mem_src.addr_transl(i) is Some
1234 &&& mem_src.memory.contains_key(mem_src.addr_transl(i).unwrap().0)
1235 &&& mem_src.memory[mem_src.addr_transl(i).unwrap().0].contents[mem_src.addr_transl(i).unwrap().1 as int] is Init
1236 },
1237 ensures
1238 self.inv(),
1239 writer.inv(),
1240 owner_r.inv(),
1241 owner_w.inv(),
1242 owner_r.inv_with_reader(*self),
1243 owner_w.inv_with_writer(*writer),
1244 r == vstd::math::min(old(self).remain_spec() as int, old(writer).avail_spec() as int),
1245 self.remain_spec() == old(self).remain_spec() - r as usize,
1246 self.cursor.vaddr == old(self).cursor.vaddr + r as usize,
1247 writer.avail_spec() == old(writer).avail_spec() - r as usize,
1248 writer.cursor.vaddr == old(writer).cursor.vaddr + r as usize,
1249 )]
1250 pub fn read(&mut self, writer: &mut VmWriter) -> usize {
1251 let mut copy_len = if self.remain() < writer.avail() {
1252 self.remain()
1253 } else {
1254 writer.avail()
1255 };
1256
1257 if copy_len == 0 {
1258 return 0;
1259 }
1260 let tracked mv_r = match owner_r.mem_view.tracked_take() {
1261 VmIoMemView::ReadView(mv) => mv,
1262 _ => { proof_from_false() },
1263 };
1264 let tracked mut mv_w = match owner_w.mem_view.tracked_take() {
1265 VmIoMemView::WriteView(mv) => mv,
1266 _ => { proof_from_false() },
1267 };
1268 let ghost mv_w_pre = mv_w;
1269
1270 // Now `memcpy` becomes a `safe` APIs since now we have the tracked permissions
1271 // for both reader and writer to guarantee that the memory accesses are valid.
1272 //
1273 // This is equivalent to: memcpy(writer.cursor.vaddr, self.cursor.vaddr, copy_len);
1274 VirtPtr::copy_nonoverlapping(
1275 &self.cursor,
1276 &writer.cursor,
1277 Tracked(mv_r),
1278 Tracked(&mut mv_w),
1279 copy_len,
1280 );
1281
1282 self.advance(copy_len);
1283 writer.advance(copy_len);
1284
1285 proof {
1286 let ghost mv_w0 = mv_w;
1287 owner_w.mem_view = Some(VmIoMemView::WriteView(mv_w));
1288 owner_r.mem_view = Some(VmIoMemView::ReadView(mv_r));
1289
1290 assert forall|va|
1291 owner_w.range@.start <= va < owner_w.range@.end implies mv_w.addr_transl(
1292 va,
1293 ) is Some by {
1294 assert(mv_w.mappings == mv_w_pre.mappings);
1295 assert(mv_w.addr_transl(va) == mv_w_pre.addr_transl(va));
1296 }
1297
1298 owner_w.advance(copy_len);
1299 owner_r.advance(copy_len);
1300 }
1301
1302 copy_len
1303 }
1304
1305 /// Reads a value of `Pod` type.
1306 ///
1307 /// If the length of the `Pod` type exceeds `self.remain()`,
1308 /// this method will return `Err`.
1309 ///
1310 /// # Verified Properties
1311 /// ## Preconditions
1312 /// - The reader and its owner must satisfy their invariants.
1313 /// - The owner must match this reader and carry a read memory view.
1314 /// - The readable range must translate to initialized bytes in the read view.
1315 /// ## Postconditions
1316 /// - The reader and owner still satisfy their invariants.
1317 /// - On success, the cursor advances by `size_of::<T>()`.
1318 /// - On error, the reader state is unchanged.
1319 #[verus_spec(r =>
1320 with
1321 Tracked(owner): Tracked<&mut VmIoOwner<'_>>,
1322 requires
1323 old(self).inv(),
1324 old(owner).inv(),
1325 old(owner).inv_with_reader(*old(self)),
1326 old(owner).mem_view matches Some(VmIoMemView::ReadView(_)),
1327 old(owner).mem_view matches Some(VmIoMemView::ReadView(mem_src)) ==> {
1328 forall|i: usize|
1329 #![trigger mem_src.addr_transl(i)]
1330 old(self).cursor.vaddr <= i < old(self).cursor.vaddr + old(self).remain_spec() ==> {
1331 &&& mem_src.addr_transl(i) is Some
1332 &&& mem_src.memory.contains_key(mem_src.addr_transl(i).unwrap().0)
1333 &&& mem_src.memory[mem_src.addr_transl(i).unwrap().0].contents[mem_src.addr_transl(i).unwrap().1 as int] is Init
1334 }
1335 },
1336 ensures
1337 self.inv(),
1338 owner.inv(),
1339 owner.inv_with_reader(*self),
1340 match r {
1341 Ok(_) => {
1342 &&& self.remain_spec() == old(self).remain_spec() - core::mem::size_of::<T>()
1343 &&& self.cursor.vaddr == old(self).cursor.vaddr + core::mem::size_of::<T>()
1344 },
1345 Err(_) => {
1346 *old(self) == *self
1347 },
1348 }
1349 )]
1350 pub fn read_val<T: Pod>(&mut self) -> Result<T> {
1351 if self.remain() < core::mem::size_of::<T>() {
1352 return Err(Error::InvalidArgs);
1353 }
1354 let len = core::mem::size_of::<T>();
1355 let tracked mem_src = owner.tracked_read_view_unwrap();
1356
1357 proof_with!(Tracked(mem_src));
1358 let v = read_pod_from_view(self.cursor);
1359
1360 self.advance(len);
1361
1362 proof {
1363 owner.advance(len);
1364 }
1365
1366 Ok(v)
1367 }
1368
1369 /// Reads a value of the `PodOnce` type using one non-tearing memory load.
1370 ///
1371 /// If the length of the `PodOnce` type exceeds `self.remain()`, this method will return `Err`.
1372 ///
1373 /// This method will not compile if the `Pod` type is too large for the current architecture
1374 /// and the operation must be tear into multiple memory loads.
1375 ///
1376 /// # Panics
1377 ///
1378 /// This method will panic if the current position of the reader does not meet the alignment
1379 /// requirements of type `T`.
1380 ///
1381 /// # Verified Properties
1382 /// ## Preconditions
1383 /// - The reader and its owner must satisfy their invariants.
1384 /// - The owner must match this reader and carry a read memory view.
1385 /// - The readable range must translate to initialized bytes in the read view.
1386 /// - The current cursor must satisfy the alignment requirements of `T`.
1387 /// ## Postconditions
1388 /// - The reader and owner still satisfy their invariants.
1389 /// - On success, the cursor advances by `size_of::<T>()`.
1390 /// - On error, the reader state is unchanged.
1391 #[verus_spec(r =>
1392 with
1393 Tracked(owner): Tracked<&mut VmIoOwner<'_>>,
1394 requires
1395 old(self).inv(),
1396 old(owner).inv(),
1397 old(owner).inv_with_reader(*old(self)),
1398 old(owner).mem_view matches Some(VmIoMemView::ReadView(_)),
1399 old(self).cursor.vaddr % core::mem::align_of::<T>() == 0,
1400 old(owner).mem_view matches Some(VmIoMemView::ReadView(mem_src)) ==> {
1401 forall|i: usize|
1402 #![trigger mem_src.addr_transl(i)]
1403 old(self).cursor.vaddr <= i < old(self).cursor.vaddr + core::mem::size_of::<T>() ==> {
1404 &&& mem_src.addr_transl(i) is Some
1405 &&& mem_src.memory.contains_key(mem_src.addr_transl(i).unwrap().0)
1406 &&& mem_src.memory[mem_src.addr_transl(i).unwrap().0].contents[mem_src.addr_transl(i).unwrap().1 as int] is Init
1407 }
1408 },
1409 ensures
1410 self.inv(),
1411 owner.inv(),
1412 owner.inv_with_reader(*self),
1413 match r {
1414 Ok(_) => {
1415 &&& self.remain_spec() == old(self).remain_spec() - core::mem::size_of::<T>()
1416 &&& self.cursor.vaddr == old(self).cursor.vaddr + core::mem::size_of::<T>()
1417 },
1418 Err(_) => {
1419 *old(self) == *self
1420 },
1421 }
1422 )]
1423 pub fn read_val_once<T: PodOnce>(&mut self) -> Result<T> {
1424 if core::mem::size_of::<T>() > self.remain() {
1425 return Err(Error::InvalidArgs);
1426 }
1427 // SAFETY: We have checked that the number of bytes remaining is at least the size of `T`
1428 // and that the cursor is properly aligned with respect to the type `T`. All other safety
1429 // requirements are the same as for `Self::read`.
1430
1431 let tracked mem_src = owner.tracked_read_view_unwrap();
1432 proof_with!(Tracked(mem_src));
1433 let v = read_once_from_view::<T>(self.cursor);
1434 self.advance(core::mem::size_of::<T>());
1435
1436 proof {
1437 owner.advance(core::mem::size_of::<T>());
1438 }
1439
1440 Ok(v)
1441 }
1442}
1443
1444#[verus_verify]
1445impl<'a> VmWriter<'a> {
1446 pub open spec fn avail_spec(&self) -> usize {
1447 (self.end.vaddr - self.cursor.vaddr) as usize
1448 }
1449
1450 /// Constructs a [`VmWriter`] from a pointer, which represents a memory range in USER space.
1451 ///
1452 /// ⚠️ WARNING: Currently not implemented yet.
1453 ///
1454 /// # Verified Properties
1455 /// ## Preconditions
1456 /// - `ptr` must satisfy [`VirtPtr::inv`].
1457 /// ## Postconditions
1458 /// - The returned [`VmWriter`] satisfies its invariant.
1459 /// - The returned writer is associated with a [`VmIoOwner`] that satisfies both [`VmIoOwner::inv`]
1460 /// and [`VmIoOwner::inv_with_writer`].
1461 /// - The owner has the same range as `ptr`, has no memory view yet, and is marked as user-space
1462 /// and infallible.
1463 #[verifier::external_body]
1464 #[verus_spec(r =>
1465 with
1466 Ghost(id): Ghost<nat>,
1467 -> owner: Tracked<VmIoOwner<'a>>,
1468 requires
1469 ptr.inv(),
1470 ensures
1471 r.inv(),
1472 owner@.id == id,
1473 owner@.inv(),
1474 owner@.inv_with_writer(r),
1475 owner@.range == ptr.range@,
1476 owner@.mem_view is None,
1477 !owner@.is_kernel,
1478 !owner@.is_fallible,
1479 )]
1480 pub unsafe fn from_user_space(ptr: VirtPtr) -> Self {
1481 // SAFETY: The caller must ensure the safety requirements.
1482 unimplemented!()
1483 }
1484
1485 /// Returns the number of available bytes that can be written.
1486 ///
1487 /// This has the same implementation as [`VmReader::remain`] but semantically
1488 /// they are different.
1489 ///
1490 /// # Verified Properties
1491 /// ## Preconditions
1492 /// - `self` must satisfy its invariant.
1493 /// ## Postconditions
1494 /// - The returned value equals [`Self::avail_spec`].
1495 #[inline]
1496 #[verus_spec(r =>
1497 requires
1498 self.inv(),
1499 ensures
1500 r == self.avail_spec(),
1501 )]
1502 #[verifier::when_used_as_spec(avail_spec)]
1503 pub fn avail(&self) -> usize {
1504 self.end.vaddr - self.cursor.vaddr
1505 }
1506
1507 /// Advances the cursor by `len` bytes.
1508 ///
1509 /// # Verified Properties
1510 /// ## Preconditions
1511 /// - `self` must satisfy its invariant.
1512 /// - `len` must not exceed the remaining writable bytes.
1513 /// ## Postconditions
1514 /// - `self` still satisfies its invariant.
1515 /// - The cursor advances by `len`.
1516 /// - The remaining writable bytes decrease by `len`.
1517 /// - The writer identity and end cursor are preserved.
1518 #[inline]
1519 #[verus_spec(
1520 requires
1521 old(self).inv(),
1522 len <= old(self).avail_spec(),
1523 ensures
1524 self.inv(),
1525 self.avail_spec() == old(self).avail_spec() - len,
1526 self.cursor.vaddr == old(self).cursor.vaddr + len,
1527 self.cursor.range@ == old(self).cursor.range@,
1528 self.id == old(self).id,
1529 self.end == old(self).end,
1530 self.cursor.range@ == old(self).cursor.range@,
1531 )]
1532 pub fn advance(&mut self, len: usize) {
1533 self.cursor.vaddr = self.cursor.vaddr + len;
1534 }
1535
1536 /// Writes data into `self` by reading from the provided `reader`.
1537 ///
1538 /// This function treats `self` as the destination buffer. It pulls data *from*
1539 /// the source `reader` and writes it into the current instance.
1540 ///
1541 /// # Arguments
1542 ///
1543 /// * `reader` - The source `VmReader` to read data from.
1544 ///
1545 /// # Returns
1546 ///
1547 /// Returns the number of bytes written to `self` (which is equal to the number of bytes read from `reader`).
1548 ///
1549 /// # Verified Properties
1550 /// ## Preconditions
1551 /// - The writer, reader, and both associated owners must satisfy their invariants.
1552 /// - The owners must match the given writer and reader.
1553 /// - The writer owner must carry a write memory view.
1554 /// - The source and destination ranges must not overlap.
1555 /// - The reader owner must provide initialized readable memory for the readable range.
1556 /// ## Postconditions
1557 /// - The writer, reader, and both owners still satisfy their invariants.
1558 /// - The owners still match the updated writer and reader.
1559 /// - The returned byte count equals the minimum of writable bytes and readable bytes.
1560 /// - Both cursors advance by exactly the returned byte count.
1561 #[inline]
1562 #[verus_spec(r =>
1563 with
1564 Tracked(owner_w): Tracked<&mut VmIoOwner<'_>>,
1565 Tracked(owner_r): Tracked<&mut VmIoOwner<'_>>,
1566 requires
1567 old(self).inv(),
1568 old(reader).inv(),
1569 old(owner_w).inv(),
1570 old(owner_r).inv(),
1571 old(owner_w).inv_with_writer(*old(self)),
1572 old(owner_r).inv_with_reader(*old(reader)),
1573 old(owner_w).mem_view matches Some(VmIoMemView::WriteView(_)),
1574 // Non-overlapping requirements.
1575 old(self).cursor.range@.start >= old(reader).cursor.range@.end
1576 || old(reader).cursor.range@.start >= old(self).cursor.range@.end,
1577 old(owner_r).mem_view matches Some(VmIoMemView::ReadView(mem_src)) &&
1578 forall|i: usize|
1579 #![trigger mem_src.addr_transl(i)]
1580 old(reader).cursor.vaddr <= i < old(reader).cursor.vaddr + old(reader).remain_spec() ==> {
1581 &&& mem_src.addr_transl(i) is Some
1582 &&& mem_src.memory.contains_key(mem_src.addr_transl(i).unwrap().0)
1583 &&& mem_src.memory[mem_src.addr_transl(i).unwrap().0].contents[mem_src.addr_transl(i).unwrap().1 as int] is Init
1584 },
1585 ensures
1586 self.inv(),
1587 reader.inv(),
1588 owner_w.inv(),
1589 owner_r.inv(),
1590 owner_w.inv_with_writer(*self),
1591 owner_r.inv_with_reader(*reader),
1592 r == vstd::math::min(old(self).avail_spec() as int, old(reader).remain_spec() as int),
1593 self.avail_spec() == old(self).avail_spec() - r as usize,
1594 self.cursor.vaddr == old(self).cursor.vaddr + r as usize,
1595 reader.remain_spec() == old(reader).remain_spec() - r as usize,
1596 reader.cursor.vaddr == old(reader).cursor.vaddr + r as usize,
1597 )]
1598 pub fn write(&mut self, reader: &mut VmReader) -> usize {
1599 proof_with!(Tracked(owner_r), Tracked(owner_w));
1600 reader.read(self)
1601 }
1602
1603 /// Writes a value of `Pod` type.
1604 ///
1605 /// If the length of the `Pod` type exceeds `self.avail()`,
1606 /// this method will return `Err`.
1607 ///
1608 /// # Verified Properties
1609 /// ## Preconditions
1610 /// - The writer and its owner must satisfy their invariants.
1611 /// - The owner must match this writer and carry a memory view.
1612 /// ## Postconditions
1613 /// - The writer and owner still satisfy their invariants.
1614 /// - The owner parameters tracked by [`VmIoOwner::params_eq`] are preserved.
1615 /// - On success, the cursor advances by `size_of::<T>()`.
1616 /// - On error, the writer state is unchanged.
1617 #[verifier::external_body]
1618 #[verus_spec(r =>
1619 with
1620 Ghost(id): Ghost<nat>,
1621 Tracked(owner_w): Tracked<&mut VmIoOwner<'_>>,
1622 Tracked(memview_r): Tracked<&MemView>,
1623 requires
1624 old(self).inv(),
1625 old(owner_w).inv(),
1626 old(owner_w).inv_with_writer(*old(self)),
1627 old(owner_w).mem_view is Some,
1628 ensures
1629 self.inv(),
1630 owner_w.inv(),
1631 owner_w.inv_with_writer(*self),
1632 match r {
1633 Ok(_) => {
1634 &&& self.avail_spec() == old(self).avail_spec() - core::mem::size_of::<T>()
1635 &&& self.cursor.vaddr == old(self).cursor.vaddr + core::mem::size_of::<T>()
1636 },
1637 Err(_) => {
1638 *old(self) == *self
1639 },
1640 }
1641 )]
1642 pub fn write_val<T: Pod>(&mut self, val: &mut T) -> Result<()> {
1643 if self.avail() < core::mem::size_of::<T>() {
1644 return Err(Error::InvalidArgs);
1645 }
1646 proof_with!(Ghost(id) => Tracked(owner_r));
1647 let mut reader = VmReader::from_pod(val)?;
1648
1649 let tracked mut owner_r = owner_r.tracked_unwrap();
1650
1651 proof_with!(Tracked(owner_w), Tracked(&mut owner_r));
1652 self.write(&mut reader);
1653
1654 Ok(())
1655 }
1656
1657 #[verifier::external_body]
1658 #[verus_spec(
1659 requires
1660 self.inv(),
1661 core::mem::size_of::<T>() <= self.avail_spec(),
1662 )]
1663 fn write_once_inner<T: PodOnce>(&self, new_val: &mut T) {
1664 let cursor = self.cursor.vaddr as *mut T;
1665 unsafe { cursor.write_volatile(*new_val) };
1666 }
1667
1668 /// Writes a value of the `PodOnce` type using one non-tearing memory store.
1669 ///
1670 /// If the length of the `PodOnce` type exceeds `self.avail()`, this method will return `Err`.
1671 ///
1672 /// # Verified Properties
1673 /// ## Preconditions
1674 /// - The writer and its owner must satisfy their invariants.
1675 /// - The owner must match this writer and carry a memory view.
1676 /// ## Postconditions
1677 /// - The writer and owner still satisfy their invariants.
1678 /// - On success, the cursor advances by `size_of::<T>()`.
1679 /// - On error, the writer state is unchanged.
1680 #[verus_spec(r =>
1681 with
1682 Tracked(owner_w): Tracked<&mut VmIoOwner<'_>>,
1683 requires
1684 old(self).inv(),
1685 old(owner_w).mem_view is Some,
1686 old(owner_w).inv(),
1687 old(owner_w).inv_with_writer(*old(self)),
1688 ensures
1689 self.inv(),
1690 owner_w.inv(),
1691 owner_w.inv_with_writer(*self),
1692 match r {
1693 Ok(_) => {
1694 &&& self.avail_spec() == old(self).avail_spec() - core::mem::size_of::<T>()
1695 &&& self.cursor.vaddr == old(self).cursor.vaddr + core::mem::size_of::<T>()
1696 },
1697 Err(_) => {
1698 *old(self) == *self
1699 },
1700 }
1701 )]
1702 pub fn write_once<T: PodOnce>(&mut self, new_val: &mut T) -> Result<()> {
1703 if core::mem::size_of::<T>() > self.avail() {
1704 return Err(Error::InvalidArgs);
1705 }
1706 // SAFETY: We have checked that the number of bytes available is at least the size of `T`
1707 // and that the cursor is properly aligned with respect to the type `T`. All other safety
1708 // requirements are the same as for `Self::write`.
1709
1710 self.write_once_inner::<T>(new_val);
1711 self.advance(core::mem::size_of::<T>());
1712
1713 proof {
1714 owner_w.advance(core::mem::size_of::<T>());
1715 }
1716
1717 Ok(())
1718 }
1719}
1720
1721} // verus!