ostd/specs/mm/vm_space.rs
1use vstd::pervasive::proof_from_false;
2use vstd::prelude::*;
3
4use vstd_extra::ownership::*;
5
6use crate::mm::io::{VmIoMemView, VmIoOwner, VmReader, VmWriter};
7use crate::mm::vm_space::{UserPtConfig, VmSpace};
8use crate::mm::{Paddr, Vaddr};
9use crate::specs::mm::page_table::{Guards, Mapping, OwnerSubtree, PageTableOwner, PageTableView};
10use crate::specs::mm::tlb::TlbModel;
11use crate::specs::mm::virt_mem_newer::{FrameContents, MemView};
12
13verus! {
14
15/// This struct is used for reading/writing memories represented by the
16/// [`VmReader`] or [`VmWriter`]. We also require a valid `vmspace_owner`
17/// must be present in this struct to ensure that the reader/writer is
18/// not created out of thin air.
19pub tracked struct VmIoPermission<'a> {
20 pub vmio_owner: VmIoOwner<'a>,
21 pub vmspace_owner: VmSpaceOwner<'a>,
22}
23
24/// A tracked struct for reasoning about verification-only properties of a [`VmSpace`].
25///
26/// This struct serves as a bookkeeper for all _active_ readers/writers within a specific
27/// virtual memory space. It maintains a holistic view of the memory range covered by the
28/// VM space it is tracking using a [`Ghost<MemView>`]. It also maintains a [`Tracked<MemView>`]
29/// for the current memories it is holding permissions for, which is a subset of the total
30/// memory range.
31///
32/// The management of each reader/writer and their corresponding memory views and permissions
33/// must talk to this struct's APIs for properly taking and returning permissions, which ensures
34/// the consistency of the overall VM space so that, e.g., no memory-aliasing will occur during
35/// the lifetime of the readers/writers, and that no reader/writer will be created out of thin air
36/// without the permission from the VM space owner (or the verification rejects invalid requests).
37///
38/// # Lifecycle of a reader/writer
39///
40/// We briefly introduce how we manage the lifecycle of a reader/writer under the management of
41/// [`VmSpaceOwner`] in this section. In the first place we require that whenever the reader or
42/// writer is being used to read or write memory, a matching permission called [`VmIoOwner`] must
43/// be present. Thus the key is to properly manage the creation and deletion of the [`VmIoOwner`].
44///
45/// 1. **Creation**: To create a new reader/writer, we first check if the new reader/writer can be created
46/// under the current VM space owner using the APIs [`Self::can_create_reader`] and [`Self::can_create_writer`].
47/// Interestingly, this creates an empty reader/writer that doesn't hold any memory view yet.
48/// 2. **Activation**: This is the magic step where we assign a memory view to the reader/writer. Via
49/// [`Self::activate_reader`] and [`Self::activate_writer`],
50/// the permissions will be moved from the VM space owner to the reader/writer. Note that readers do not
51/// need owned permissions so they just borrow the memory view from the VM space owner, while writers need to
52/// take the ownership of the memory view from the VM space owner.
53/// After this step, the reader/writer is fully activated and can be used to read/write memory.
54/// 3. **Disposal**: After the reader/writer finishes the reading/writing operation, we can dispose it via
55/// [`Self::dispose_reader`] and [`Self::dispose_writer`]. This step is the reverse of activation, where the
56/// permissions will be moved back from the reader/writer to the VM space owner. After this step, the reader/writer
57/// is considered disposed but re-usable as long as it is properly activated again before use.
58/// 4. **Removal**: If we know for sure that the reader/writer will never be used again, we can remove it from the
59/// active list via [`Self::remove_reader`] and [`Self::remove_writer`].
60pub tracked struct VmSpaceOwner<'a> {
61 /// The owner of the page table of this VM space.
62 pub page_table_owner: OwnerSubtree<UserPtConfig>,
63 /// Whether this VM space is currently active.
64 pub active: bool,
65 /// Active readers for this VM space.
66 pub readers: Seq<VmIoOwner<'a>>,
67 /// Active writers for this VM space.
68 pub writers: Seq<VmIoOwner<'a>>,
69 /// The "actual" memory view of this VM space where some
70 /// of the mappings may be transferred to the writers.
71 pub mem_view: Option<MemView>,
72 /// This is the holistic view of the memory range covered by this VM space owner.
73 pub mv_range: Ghost<Option<MemView>>,
74 /// Whether we allow shared reading.
75 pub shared_reader: bool,
76}
77
78impl<'a> Inv for VmSpaceOwner<'a> {
79 /// Defines the invariant for `VmSpaceOwner`.
80 ///
81 /// This specification ensures the consistency of the VM space, particularly
82 /// regarding memory access permissions and overlapping ranges.
83 ///
84 /// # Invariants
85 /// 1. **Recursion**: The underlying `page_table_owner` must satisfy its own invariant.
86 /// 2. **Finiteness**: The sets of readers and writers must be finite.
87 /// 3. **Active State Consistency**: If the VM space is marked as `active`:
88 /// - **ID Separation**: A handle ID cannot be both a reader and a writer simultaneously.
89 /// - **Element Validity**: All stored `VmIoOwner` instances must be valid and
90 /// their stored ID must match their map key.
91 /// - **Memory Isolation (Read-Write)**: No Reader memory range may overlap with any Writer memory range.
92 /// - **Memory Isolation (Write-Write)**: No Writer memory range may overlap with any other Writer memory range.
93 /// - **Conditional Read Isolation**: If `shared_reader` is set, Readers must be mutually disjoint (cannot overlap).
94 open spec fn inv(self) -> bool {
95 &&& self.page_table_owner.inv()
96 &&& self.active ==> {
97 &&& self.mem_view_wf()
98 &&& self.mem_view matches Some(mem_view) ==> {
99 // Readers and writers are valid.
100 &&& forall|i: int|
101 #![trigger self.readers[i]]
102 0 <= i < self.readers.len() as int ==> {
103 &&& self.readers[i].inv()
104 }
105 &&& forall|i: int|
106 #![trigger self.writers[i]]
107 0 <= i < self.writers.len() as int ==> {
108 &&& self.writers[i].inv()
109 }
110 // --- Memory Range Overlap Checks ---
111 // Readers do not overlap with other readers, and writers do not overlap with other writers.
112 &&& forall|i, j: int|
113 #![trigger self.readers[i], self.writers[j]]
114 0 <= i < self.readers.len() as int && 0 <= j < self.writers.len() as int ==> {
115 let r = self.readers[i];
116 let w = self.writers[j];
117 r.disjoint(w)
118 }
119 &&& !self.shared_reader ==> forall|i, j: int|
120 #![trigger self.readers[i], self.readers[j]]
121 0 <= i < self.readers.len() as int && 0 <= j < self.readers.len() as int && i
122 != j ==> {
123 let r1 = self.readers[i];
124 let r2 = self.readers[j];
125 r1.disjoint(r2)
126 }
127 &&& forall|i, j: int|
128 #![trigger self.writers[i], self.writers[j]]
129 0 <= i < self.writers.len() as int && 0 <= j < self.writers.len() as int && i
130 != j ==> {
131 let w1 = self.writers[i];
132 let w2 = self.writers[j];
133 w1.disjoint(w2)
134 }
135 }
136 }
137 }
138}
139
140impl<'a> VmSpaceOwner<'a> {
141 /// This specification function ensures that the `mem_view` (remaining view),
142 /// `mv_range` (total view), and the views held by active readers and writers
143 /// maintain a consistent global state.
144 ///
145 /// The key properties include:
146 ///
147 /// ### 1. Existence Invariants
148 /// * `mem_view` is present if and only if `mv_range` is present.
149 ///
150 /// ### 2. Structural Integrity
151 /// * Both the remaining view and the total view must have finite memory mappings.
152 /// * Internal mappings within each view must be disjoint (no overlapping address ranges).
153 ///
154 /// ### 3. Global Consistency
155 /// * **Subset Relation**: The remaining view's mappings and memory domain must be subsets of the total view.
156 /// * **Translation Equality**: For any virtual address (VA), the address translation in the remaining view must match the translation in the total view.
157 ///
158 /// ### 4. Writer Invariants (Exclusive Ownership)
159 /// * **Type Verification**: Every writer must hold a `WriteView`.
160 /// * **Translation Consistency**: Writer translations must match the total view.
161 /// * **Mutual Exclusion**: If a writer translates a VA, that VA must not exist in the remaining view's translation table or memory domain.
162 /// * **Mapping Isolation**: Writer mappings must be disjoint from the remaining view's mappings and must be a subset of the total view's mappings.
163 ///
164 /// ### 5. Reader Invariants (Shared Consistency)
165 /// * **Type Verification**: Every reader must hold a `ReadView`.
166 /// * **Translation Consistency**: Reader translations must be consistent with the total view.
167 pub open spec fn mem_view_wf(self) -> bool {
168 &&& self.mem_view is Some
169 <==> self.mv_range@ is Some
170 // This requires that TotalMapping (mvv) = mv ∪ writer mappings ∪ reader mappings
171 &&& self.mem_view matches Some(remaining_view) ==> self.mv_range@ matches Some(total_view)
172 ==> {
173 &&& remaining_view.mappings_are_disjoint()
174 &&& remaining_view.mappings.finite()
175 &&& total_view.mappings_are_disjoint()
176 &&& total_view.mappings.finite()
177 // ======================
178 // Remaining Consistency
179 // ======================
180 &&& remaining_view.mappings.subset_of(total_view.mappings)
181 &&& remaining_view.memory.dom().subset_of(
182 total_view.memory.dom(),
183 )
184 // =====================
185 // Total View Consistency
186 // =====================
187 &&& forall|va: usize|
188 #![trigger remaining_view.addr_transl(va)]
189 #![trigger total_view.addr_transl(va)]
190 remaining_view.addr_transl(va) == total_view.addr_transl(
191 va,
192 )
193 // =====================
194 // Writer correctness
195 // =====================
196 &&& forall|i: int|
197 #![trigger self.writers[i]]
198 0 <= i < self.writers.len() as int ==> {
199 let writer = self.writers[i];
200
201 &&& writer.mem_view matches Some(VmIoMemView::WriteView(writer_mv)) && {
202 &&& forall|va: usize|
203 #![trigger writer_mv.addr_transl(va)]
204 #![trigger total_view.addr_transl(va)]
205 #![trigger remaining_view.addr_transl(va)]
206 #![trigger remaining_view.memory.contains_key(va)]
207 {
208 // We do not enforce that the range must be the same as the
209 // memory view it holds as the writer may not consume all the
210 // memory in its range.
211 //
212 // So we cannot directly reason on `self.range` here; we need
213 // to instead ensure that the memory view it holds is consistent
214 // with the total view and remaining view.
215 &&& writer_mv.mappings.finite()
216 &&& writer_mv.addr_transl(va) == total_view.addr_transl(va)
217 &&& writer_mv.addr_transl(va) matches Some(_) ==> {
218 &&& remaining_view.addr_transl(va) is None
219 &&& !remaining_view.memory.contains_key(va)
220 }
221 }
222 &&& writer_mv.mappings.disjoint(remaining_view.mappings)
223 &&& writer_mv.mappings.subset_of(total_view.mappings)
224 &&& writer_mv.memory.dom().subset_of(total_view.memory.dom())
225 }
226 }
227 // =====================
228 // Reader correctness
229 // =====================
230 &&& forall|i: int|
231 #![trigger self.readers[i]]
232 0 <= i < self.readers.len() as int ==> {
233 let reader = self.readers[i];
234
235 &&& reader.mem_view matches Some(VmIoMemView::ReadView(reader_mv)) && {
236 forall|va: usize|
237 #![trigger reader_mv.addr_transl(va)]
238 #![trigger total_view.addr_transl(va)]
239 {
240 // For readers there is no need to check remaining_view
241 // because it is borrowed from remaining_view directly.
242 &&& reader_mv.mappings.finite()
243 &&& reader_mv.addr_transl(va) == total_view.addr_transl(va)
244 }
245 }
246 }
247 }
248 }
249
250 /// Determines whether a new reader can be safely instantiated within the VM address space.
251 ///
252 /// This specification function enforces memory isolation by ensuring that the
253 /// requested memory range does not intersect with the domain of any active writer.
254 pub open spec fn can_create_reader(&self, vaddr: Vaddr, len: usize) -> bool
255 recommends
256 self.inv(),
257 {
258 &&& forall|i: int|
259 #![trigger self.writers[i]]
260 0 <= i < self.writers.len() ==> !self.writers[i].overlaps_with_range(
261 vaddr..(vaddr + len) as usize,
262 )
263 }
264
265 /// Checks if we can create a new writer under this VM space owner.
266 ///
267 /// Similar to [`can_create_reader`], but checks both active readers and writers.
268 pub open spec fn can_create_writer(&self, vaddr: Vaddr, len: usize) -> bool
269 recommends
270 self.inv(),
271 {
272 &&& forall|i: int|
273 #![trigger self.readers[i]]
274 0 <= i < self.readers.len() ==> !self.readers[i].overlaps_with_range(
275 vaddr..(vaddr + len) as usize,
276 )
277 &&& forall|i: int|
278 #![trigger self.writers[i]]
279 0 <= i < self.writers.len() ==> !self.writers[i].overlaps_with_range(
280 vaddr..(vaddr + len) as usize,
281 )
282 }
283
284 /// Generates a new unique ID for VM IO owners.
285 ///
286 /// This assumes that we always generate a fresh ID that is not used by any existing
287 /// readers or writers. This should be safe as the ID space is unbounded and only used
288 /// to reason about different VM IO owners in verification.
289 pub uninterp spec fn new_vm_io_id(&self) -> nat
290 recommends
291 self.inv(),
292 ;
293
294 /// Activates the given reader to read data from the user space of the current task.
295 /// # Verified Properties
296 /// ## Preconditions
297 /// - The [`VmSpace`] invariants must hold with respect to the [`VmSpaceOwner`], which must be active.
298 /// - The reader must be well-formed with respect to the [`VmSpaceOwner`].
299 /// - The reader's virtual address range must be mapped within the [`VmSpaceOwner`]'s memory view.
300 /// ## Postconditions
301 /// - The reader will be added to the [`VmSpace`]'s readers list.
302 /// - The reader will be activated with a view of its virtual address range taken from the [`VmSpaceOwner`]'s memory view.
303 /// ## Safety
304 /// - The function preserves all memory invariants.
305 /// - The [`MemView`] invariants ensure that the reader has a consistent view of memory.
306 /// - The [`VmSpaceOwner`] invariants ensure that the viewed memory is owned exclusively by this [`VmSpace`].
307 #[inline(always)]
308 #[verus_spec(r =>
309 requires
310 old(self).mem_view matches Some(mv) &&
311 forall |va: usize|
312 #![auto]
313 old(owner_r).range@.start <= va < old(owner_r).range@.end ==>
314 mv.addr_transl(va) is Some
315 ,
316 old(self).inv(),
317 old(self).active,
318 old(owner_r).inv_with_reader(*reader),
319 old(owner_r).mem_view is None,
320 reader.inv(),
321 ensures
322 owner_r.inv_with_reader(*reader),
323 owner_r.mem_view == Some(VmIoMemView::ReadView(&old(self).mem_view@.unwrap().borrow_at_spec(
324 old(owner_r).range@.start,
325 (old(owner_r).range@.end - old(owner_r).range@.start) as usize,
326 ))),
327 )]
328 pub proof fn activate_reader(tracked &mut self, reader: &'a VmReader<'a>, owner_r: &'a mut VmIoOwner<'a>) {
329 let tracked mv = match self.mem_view {
330 Some(ref mv) => mv,
331 _ => { proof_from_false() },
332 };
333 let tracked borrowed_mv = mv.borrow_at(
334 owner_r.range@.start,
335 (owner_r.range@.end - owner_r.range@.start) as usize,
336 );
337
338 owner_r.mem_view = Some(VmIoMemView::ReadView(borrowed_mv));
339
340 assert forall|va: usize|
341 #![auto]
342 owner_r.range@.start <= va < owner_r.range@.end implies borrowed_mv.addr_transl(
343 va,
344 ) is Some by {
345 if owner_r.range@.start <= va && va < owner_r.range@.end {
346 assert(borrowed_mv.mappings =~= mv.mappings.filter(
347 |m: Mapping|
348 m.va_range.start < (owner_r.range@.end) && m.va_range.end
349 > owner_r.range@.start,
350 ));
351 let o_borrow_mv = borrowed_mv.mappings.filter(
352 |m: Mapping| m.va_range.start <= va < m.va_range.end,
353 );
354 let o_mv = mv.mappings.filter(
355 |m: Mapping| m.va_range.start <= va < m.va_range.end,
356 );
357 assert(mv.addr_transl(va) is Some);
358 assert(o_mv.len() > 0);
359 assert(o_borrow_mv.len() > 0) by {
360 let m = o_mv.choose();
361 assert(o_mv.contains(m)) by {
362 vstd::set::axiom_set_choose_len(o_mv);
363 }
364 assert(o_borrow_mv.contains(m));
365 }
366 }
367 }
368
369 }
370
371 /// Activates the given writer to write data to the user space of the current task.
372 /// # Verified Properties
373 /// ## Preconditions
374 /// - The [`VmSpace`] invariants must hold with respect to the [`VmSpaceOwner`], which must be active.
375 /// - The writer must be well-formed with respect to the `[VmSpaceOwner`].
376 /// - The writer's virtual address range must be mapped within the [`VmSpaceOwner`]'s memory view.
377 /// ## Postconditions
378 /// - The writer will be added to the [`VmSpace`]'s writers list.
379 /// - The writer will be activated with a view of its virtual address range taken from the [`VmSpaceOwner`]'s memory view.
380 /// ## Safety
381 /// - The function preserves all memory invariants.
382 /// - The [`MemView`] invariants ensure that the writer has a consistent view of memory.
383 /// - The [`VmSpaceOwner`] invariants ensure that the viewed memory is owned exclusively by
384 /// this [`VmSpace`].
385 #[inline(always)]
386 #[verus_spec(r =>
387 requires
388 old(self).mem_view matches Some(mv) &&
389 forall |va: usize|
390 #![auto]
391 old(owner_w).range@.start <= va < old(owner_w).range@.end ==>
392 mv.addr_transl(va) is Some
393 ,
394 old(self).inv(),
395 old(self).active,
396 old(owner_w).inv_with_writer(*writer),
397 old(owner_w).mem_view is None,
398 writer.inv(),
399 ensures
400 owner_w.inv_with_writer(*writer),
401 owner_w.mem_view == Some(VmIoMemView::WriteView(old(self).mem_view@.unwrap().split_spec(
402 old(owner_w).range@.start,
403 (old(owner_w).range@.end - old(owner_w).range@.start) as usize,
404 ).0)),
405 )]
406 pub proof fn activate_writer(tracked &mut self, writer: &'a VmWriter<'a>, owner_w: &'a mut VmIoOwner<'a>) {
407 let tracked mut mv = self.mem_view.tracked_take();
408 let ghost old_mv = mv;
409 let tracked (lhs, rhs) = mv.split(
410 owner_w.range@.start,
411 (owner_w.range@.end - owner_w.range@.start) as usize,
412 );
413
414 owner_w.mem_view = Some(VmIoMemView::WriteView(lhs));
415 self.mem_view = Some(rhs);
416
417 assert forall|va: usize|
418 #![auto]
419 owner_w.range@.start <= va < owner_w.range@.end implies lhs.addr_transl(
420 va,
421 ) is Some by {
422 if owner_w.range@.start <= va && va < owner_w.range@.end {
423 assert(lhs.mappings =~= old_mv.mappings.filter(
424 |m: Mapping|
425 m.va_range.start < (owner_w.range@.end) && m.va_range.end
426 > owner_w.range@.start,
427 ));
428 let o_lhs = lhs.mappings.filter(
429 |m: Mapping| m.va_range.start <= va < m.va_range.end,
430 );
431 let o_mv = old_mv.mappings.filter(
432 |m: Mapping| m.va_range.start <= va < m.va_range.end,
433 );
434
435 assert(old_mv.addr_transl(va) is Some);
436 assert(o_mv.len() > 0);
437 assert(o_lhs.len() > 0) by {
438 broadcast use vstd::set::axiom_set_choose_len;
439
440 let m = o_mv.choose();
441 assert(o_mv.contains(m));
442 assert(m.va_range.start <= va < m.va_range.end);
443 assert(o_lhs.contains(m));
444 }
445 }
446 }
447
448 }
449
450 /// Removes the given reader from the active readers list.
451 ///
452 /// # Verified Properties
453 /// ## Preconditions
454 /// - The [`VmSpace`] invariants must hold with respect to the [`VmSpaceOwner`], which must be active.
455 /// - The index `idx` must be a valid index into the active readers list.
456 /// ## Postconditions
457 /// - The reader at index `idx` will be removed from the active readers list.
458 /// - The invariants of the [`VmSpaceOwner`] will still hold after the removal
459 pub proof fn remove_reader(tracked &mut self, idx: int)
460 requires
461 old(self).inv(),
462 old(self).active,
463 old(self).mem_view is Some,
464 0 <= idx < old(self).readers.len() as int,
465 ensures
466 self.inv(),
467 self.active == old(self).active,
468 self.shared_reader == old(self).shared_reader,
469 self.readers == old(self).readers.remove(idx),
470 {
471 self.readers.tracked_remove(idx);
472 }
473
474
475 /// Removes the given writer from the active writers list.
476 ///
477 /// # Verified Properties
478 /// ## Preconditions
479 /// - The [`VmSpace`] invariants must hold with respect to the [`VmSpaceOwner`], which must be active.
480 /// - The index `idx` must be a valid index into the active writers list.
481 /// ## Postconditions
482 /// - The writer at index `idx` will be removed from the active writers list.
483 /// - The memory view held by the removed writer will be returned to the VM space owner,
484 /// ensuring that the overall memory view remains consistent.
485 /// - The invariants of the [`VmSpaceOwner`] will still hold after the removal.
486 pub proof fn remove_writer(tracked &mut self, idx: usize)
487 requires
488 old(self).inv(),
489 old(self).active,
490 old(self).mem_view is Some,
491 old(self).mv_range@ is Some,
492 0 <= idx < old(self).writers.len() as int,
493 ensures
494 self.inv(),
495 self.active == old(self).active,
496 self.shared_reader == old(self).shared_reader,
497 self.writers == old(self).writers.remove(idx as int),
498 {
499 let tracked writer = self.writers.tracked_remove(idx as int);
500
501 // Now we need to "return" the memory view back to the vm space owner.
502 let tracked mv = match writer.mem_view {
503 Some(VmIoMemView::WriteView(mv)) => mv,
504 _ => { proof_from_false() },
505 };
506
507 // "Join" the memory view back.
508 let tracked mut remaining = self.mem_view.tracked_take();
509 let ghost old_remaining = remaining;
510 remaining.join(mv);
511 self.mem_view = Some(remaining);
512
513 assert(self.mem_view_wf()) by {
514 let ghost total_view = self.mv_range@.unwrap();
515
516 assert(remaining.mappings =~= old_remaining.mappings.union(mv.mappings));
517 assert(remaining.memory =~= old_remaining.memory.union_prefer_right(mv.memory));
518 assert(self.mv_range == old(self).mv_range);
519 assert(self.mem_view == Some(remaining));
520
521 assert forall|va: usize|
522 #![auto]
523 { remaining.addr_transl(va) == total_view.addr_transl(va) } by {
524 let r_mappings = remaining.mappings.filter(
525 |m: Mapping| m.va_range.start <= va < m.va_range.end,
526 );
527 let t_mappings = total_view.mappings.filter(
528 |m: Mapping| m.va_range.start <= va < m.va_range.end,
529 );
530 let w_mappings = mv.mappings.filter(
531 |m: Mapping| m.va_range.start <= va < m.va_range.end,
532 );
533
534 assert(r_mappings.subset_of(t_mappings));
535 assert(w_mappings.subset_of(t_mappings));
536
537 if r_mappings.len() > 0 {
538 assert(t_mappings.len() > 0) by {
539 let r = r_mappings.choose();
540 assert(r_mappings.contains(r)) by {
541 vstd::set::axiom_set_choose_len(r_mappings);
542 }
543 assert(t_mappings.contains(r));
544 }
545 }
546 }
547
548 assert forall|i: int|
549 #![trigger self.writers[i]]
550 0 <= i < self.writers.len() as int implies {
551 let other_writer = self.writers[i];
552
553 &&& other_writer.mem_view matches Some(VmIoMemView::WriteView(writer_mv))
554 && writer_mv.mappings.disjoint(remaining.mappings)
555 } by {
556 let other_writer = self.writers[i];
557
558 assert(old(self).inv());
559 let writer_mv = match other_writer.mem_view {
560 Some(VmIoMemView::WriteView(mv)) => mv,
561 _ => { proof_from_false() },
562 };
563
564 assert(mv.mappings.disjoint(writer_mv.mappings)) by {
565 assert(exists|i: int|
566 0 <= i < old(self).writers.len() as int ==> #[trigger] old(self).writers[i]
567 == other_writer);
568 assert(exists|i: int|
569 0 <= i < old(self).writers.len() as int ==> #[trigger] old(self).writers[i]
570 == writer);
571 }
572 }
573 }
574 }
575
576 /// Disposes the given reader, releasing its ownership on the memory range.
577 ///
578 /// This does not mean that the owner is discarded; it indicates that someone
579 /// who finishes the reading operation can let us reclaim the permission.
580 /// The deletion of the reader is done via another API [`VmSpaceOwner::remove_reader`].
581 ///
582 /// Typically this API is called in two scenarios:
583 ///
584 /// 1. The reader has been created and we immediately move the ownership into us.
585 /// 2. The reader has finished the reading and need to return the ownership back.
586 pub proof fn dispose_reader(tracked &mut self, tracked owner: VmIoOwner<'a>)
587 requires
588 owner.inv(),
589 old(self).inv(),
590 old(self).active,
591 old(self).mv_range@ matches Some(total_view) && owner.mem_view matches Some(
592 VmIoMemView::ReadView(mv),
593 ) && old(self).mem_view matches Some(remaining) && mv.mappings.finite() && {
594 forall|va: usize|
595 #![auto]
596 {
597 &&& total_view.addr_transl(va) == mv.addr_transl(va)
598 &&& mv.mappings.finite()
599 }
600 },
601 forall|i: int|
602 #![trigger old(self).writers[i]]
603 0 <= i < old(self).writers.len() ==> old(self).writers[i].disjoint(owner),
604 forall|i: int|
605 #![trigger old(self).readers[i]]
606 0 <= i < old(self).readers.len() ==> old(self).readers[i].disjoint(owner),
607 ensures
608 self.inv(),
609 self.active == old(self).active,
610 self.shared_reader == old(self).shared_reader,
611 owner.range@.start < owner.range@.end ==> self.readers == old(self).readers.push(owner),
612 {
613 let tracked mv = match owner.mem_view {
614 Some(VmIoMemView::ReadView(mv)) => mv,
615 _ => { proof_from_false() },
616 };
617
618 if owner.range@.start < owner.range@.end {
619 // Return the memory view back to the vm space owner.
620 self.readers.tracked_push(owner);
621 }
622 }
623
624 /// Disposes the given writer, releasing its ownership on the memory range.
625 ///
626 /// This does not mean that the owner is discarded; it indicates that someone
627 /// who finishes the writing operation can let us reclaim the permission.
628 ///
629 /// The deletion of the writer is through another API [`VmSpaceOwner::remove_writer`].
630 pub proof fn dispose_writer(tracked &mut self, tracked owner: VmIoOwner<'a>)
631 requires
632 old(self).inv(),
633 old(self).active,
634 owner.inv(),
635 old(self).mv_range@ matches Some(total_view) && owner.mem_view matches Some(
636 VmIoMemView::WriteView(mv),
637 ) && old(self).mem_view matches Some(remaining) && mv.mappings.finite() && {
638 &&& forall|va: usize|
639 #![auto]
640 {
641 &&& mv.addr_transl(va) == total_view.addr_transl(va)
642 &&& mv.addr_transl(va) matches Some(_) ==> {
643 &&& remaining.addr_transl(va) is None
644 &&& !remaining.memory.contains_key(va)
645 }
646 }
647 &&& mv.mappings.disjoint(remaining.mappings)
648 &&& mv.mappings.subset_of(total_view.mappings)
649 &&& mv.memory.dom().subset_of(total_view.memory.dom())
650 },
651 forall|i: int|
652 #![trigger old(self).writers[i]]
653 0 <= i < old(self).writers.len() as int ==> old(self).writers[i].disjoint(owner),
654 forall|i: int|
655 #![trigger old(self).readers[i]]
656 0 <= i < old(self).readers.len() as int ==> old(self).readers[i].disjoint(owner),
657 ensures
658 self.inv(),
659 self.active == old(self).active,
660 self.shared_reader == old(self).shared_reader,
661 owner.range@.start < owner.range@.end ==> self.writers == old(self).writers.push(owner),
662 {
663 // If the writer has consumed all the memory, nothing to do;
664 // just discard the writer and return the permission back to
665 // the vm space owner.
666 match owner.mem_view {
667 Some(VmIoMemView::WriteView(ref writer_mv)) => {
668 if owner.range@.start < owner.range@.end {
669 self.writers.tracked_push(owner);
670 }
671 },
672 _ => {
673 assert(false);
674 },
675 }
676 }
677}
678}