1use alloc::vec::Vec;
10use vstd::pervasive::{arbitrary, proof_from_false};
11use vstd::prelude::*;
12
13use crate::specs::mm::virt_mem_newer::{MemView, VirtPtr};
14
15use crate::error::Error;
16use crate::mm::frame::untyped::UFrame;
17use crate::mm::io::VmIoMemView;
18use crate::mm::page_table::*;
19use crate::mm::page_table::{EntryOwner, PageTableFrag, PageTableGuard};
20use crate::specs::arch::*;
21use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
22use crate::specs::mm::page_table::cursor::owners::CursorOwner;
23use crate::specs::mm::page_table::*;
24use crate::specs::task::InAtomicMode;
25use core::marker::PhantomData;
26use core::{ops::Range, sync::atomic::Ordering};
27use vstd_extra::ghost_tree::*;
28use crate::specs::mm::frame::mapping::meta_to_frame;
29
30use crate::{
31 mm::{
34 io::{VmIoOwner, VmReader, VmWriter},
35 page_prop::PageProperty,
40 Paddr,
41 PagingConstsTrait,
42 PagingLevel,
43 Vaddr,
44 MAX_USERSPACE_VADDR,
45 },
46 prelude::*,
47 };
49
50use alloc::sync::Arc;
51
52verus! {
53
54#[rustc_has_incoherent_inherent_impls]
90pub struct VmSpace<'a> {
91 pub pt: PageTable<UserPtConfig>,
92 pub shared_reader: bool,
94 pub readers: Vec<&'a VmReader<'a>>,
96 pub writers: Vec<&'a VmWriter<'a>>,
98 pub marker: PhantomData<&'a ()>,
100}
101
102impl Inv for VmSpace<'_> {
103 open spec fn inv(self) -> bool {
104 &&& forall|i: int|
105 #![trigger self.readers@[i]]
106 0 <= i < self.readers@.len() as int ==> self.readers@[i].inv()
107 &&& forall|i: int|
108 #![trigger self.writers@[i]]
109 0 <= i < self.writers@.len() as int ==> self.writers@[i].inv()
110 }
111}
112
113pub tracked struct VmIoPermission<'a> {
118 pub vmio_owner: VmIoOwner<'a>,
119 pub vmspace_owner: VmSpaceOwner<'a>,
120}
121
122pub tracked struct VmSpaceOwner<'a> {
124 pub page_table_owner: OwnerSubtree<UserPtConfig>,
126 pub active: bool,
128 pub readers: Seq<VmIoOwner<'a>>,
131 pub writers: Seq<VmIoOwner<'a>>,
133 pub mem_view: Option<MemView>,
136 pub mv_range: Ghost<Option<MemView>>,
138 pub shared_reader: bool,
140}
141
142impl<'a> Inv for VmSpaceOwner<'a> {
143 open spec fn inv(self) -> bool {
159 &&& self.page_table_owner.inv()
160 &&& self.active ==> {
161 &&& self.mem_view_wf()
162 &&& self.mem_view matches Some(mem_view) ==> {
163 &&& forall|i: int|
165 #![trigger self.readers[i]]
166 0 <= i < self.readers.len() as int ==> {
167 &&& self.readers[i].inv()
168 }
169 &&& forall|i: int|
170 #![trigger self.writers[i]]
171 0 <= i < self.writers.len() as int ==> {
172 &&& self.writers[i].inv()
173 }
174 &&& forall|i, j: int|
177 #![trigger self.readers[i], self.writers[j]]
178 0 <= i < self.readers.len() as int && 0 <= j < self.writers.len() as int ==> {
179 let r = self.readers[i];
180 let w = self.writers[j];
181 r.disjoint(w)
182 }
183 &&& !self.shared_reader ==> forall|i, j: int|
184 #![trigger self.readers[i], self.readers[j]]
185 0 <= i < self.readers.len() as int && 0 <= j < self.readers.len() as int && i
186 != j ==> {
187 let r1 = self.readers[i];
188 let r2 = self.readers[j];
189 r1.disjoint(r2)
190 }
191 &&& forall|i, j: int|
192 #![trigger self.writers[i], self.writers[j]]
193 0 <= i < self.writers.len() as int && 0 <= j < self.writers.len() as int && i
194 != j ==> {
195 let w1 = self.writers[i];
196 let w2 = self.writers[j];
197 w1.disjoint(w2)
198 }
199 }
200 }
201 }
202}
203
204impl<'a> VmSpaceOwner<'a> {
205 pub open spec fn mem_view_wf(self) -> bool {
206 &&& self.mem_view is Some
207 <==> self.mv_range@ is Some
208 &&& self.mem_view matches Some(remaining_view) ==> self.mv_range@ matches Some(total_view)
210 ==> {
211 &&& remaining_view.mappings_are_disjoint()
212 &&& remaining_view.mappings.finite()
213 &&& total_view.mappings_are_disjoint()
214 &&& total_view.mappings.finite()
215 &&& remaining_view.mappings.subset_of(total_view.mappings)
219 &&& remaining_view.memory.dom().subset_of(
220 total_view.memory.dom(),
221 )
222 &&& forall|va: usize|
226 #![trigger remaining_view.addr_transl(va)]
227 #![trigger total_view.addr_transl(va)]
228 remaining_view.addr_transl(va) == total_view.addr_transl(
229 va,
230 )
231 &&& forall|i: int|
235 #![trigger self.writers[i]]
236 0 <= i < self.writers.len() as int ==> {
237 let writer = self.writers[i];
238
239 &&& writer.mem_view matches Some(VmIoMemView::WriteView(writer_mv)) && {
240 &&& forall|va: usize|
241 #![trigger writer_mv.addr_transl(va)]
242 #![trigger total_view.addr_transl(va)]
243 #![trigger remaining_view.addr_transl(va)]
244 #![trigger remaining_view.memory.contains_key(va)]
245 {
246 &&& writer_mv.mappings.finite()
254 &&& writer_mv.addr_transl(va) == total_view.addr_transl(va)
255 &&& writer_mv.addr_transl(va) matches Some(_) ==> {
256 &&& remaining_view.addr_transl(va) is None
257 &&& !remaining_view.memory.contains_key(va)
258 }
259 }
260 &&& writer_mv.mappings.disjoint(remaining_view.mappings)
261 &&& writer_mv.mappings.subset_of(total_view.mappings)
262 &&& writer_mv.memory.dom().subset_of(total_view.memory.dom())
263 }
264 }
265 &&& forall|i: int|
269 #![trigger self.readers[i]]
270 0 <= i < self.readers.len() as int ==> {
271 let reader = self.readers[i];
272
273 &&& reader.mem_view matches Some(VmIoMemView::ReadView(reader_mv)) && {
274 forall|va: usize|
275 #![trigger reader_mv.addr_transl(va)]
276 #![trigger total_view.addr_transl(va)]
277 {
278 &&& reader_mv.mappings.finite()
281 &&& reader_mv.addr_transl(va) == total_view.addr_transl(va)
282 }
283 }
284 }
285 }
286 }
287
288 pub open spec fn inv_with(&self, vm_space: VmSpace<'a>) -> bool {
290 &&& self.shared_reader == vm_space.shared_reader
291 &&& self.readers.len() == vm_space.readers@.len()
292 &&& self.writers.len() == vm_space.writers@.len()
293 &&& forall|i: int|
294 #![trigger self.readers[i]]
295 #![trigger vm_space.readers@[i]]
296 0 <= i < vm_space.readers@.len() as int ==> {
297 &&& self.readers[i].inv_with_reader(*vm_space.readers@[i])
298 }
299 &&& forall|i: int|
300 #![trigger self.writers[i]]
301 #![trigger vm_space.writers@[i]]
302 0 <= i < vm_space.writers@.len() as int ==> {
303 &&& self.writers[i].inv_with_writer(*vm_space.writers@[i])
304 }
305 }
306
307 pub open spec fn can_create_reader(&self, vaddr: Vaddr, len: usize) -> bool
311 recommends
312 self.inv(),
313 {
314 &&& forall|i: int|
315 #![trigger self.writers[i]]
316 0 <= i < self.writers.len() ==> !self.writers[i].overlaps_with_range(
317 vaddr..(vaddr + len) as usize,
318 )
319 }
320
321 pub open spec fn can_create_writer(&self, vaddr: Vaddr, len: usize) -> bool
325 recommends
326 self.inv(),
327 {
328 &&& forall|i: int|
329 #![trigger self.readers[i]]
330 0 <= i < self.readers.len() ==> !self.readers[i].overlaps_with_range(
331 vaddr..(vaddr + len) as usize,
332 )
333 &&& forall|i: int|
334 #![trigger self.writers[i]]
335 0 <= i < self.writers.len() ==> !self.writers[i].overlaps_with_range(
336 vaddr..(vaddr + len) as usize,
337 )
338 }
339
340 #[verifier::external_body]
346 #[verus_spec(r =>
347 requires
348 self.inv(),
349 )]
350 pub proof fn new_vm_io_id(&self) -> nat {
351 unimplemented!()
352 }
353
354 pub proof fn remove_reader(tracked &mut self, idx: int)
356 requires
357 old(self).inv(),
358 old(self).active,
359 old(self).mem_view is Some,
360 0 <= idx < old(self).readers.len() as int,
361 ensures
362 self.inv(),
363 self.active == old(self).active,
364 self.shared_reader == old(self).shared_reader,
365 self.readers == old(self).readers.remove(idx),
366 {
367 self.readers.tracked_remove(idx);
368 }
369
370 pub proof fn dispose_reader(tracked &mut self, tracked owner: VmIoOwner<'a>)
381 requires
382 owner.inv(),
383 old(self).inv(),
384 old(self).active,
385 old(self).mv_range@ matches Some(total_view) && owner.mem_view matches Some(
386 VmIoMemView::ReadView(mv),
387 ) && old(self).mem_view matches Some(remaining) && mv.mappings.finite() && {
388 forall|va: usize|
389 #![auto]
390 {
391 &&& total_view.addr_transl(va) == mv.addr_transl(va)
392 &&& mv.mappings.finite()
393 }
394 },
395 forall|i: int|
396 #![trigger old(self).writers[i]]
397 0 <= i < old(self).writers.len() ==> old(self).writers[i].disjoint(owner),
398 forall|i: int|
399 #![trigger old(self).readers[i]]
400 0 <= i < old(self).readers.len() ==> old(self).readers[i].disjoint(owner),
401 ensures
402 self.inv(),
403 self.active == old(self).active,
404 self.shared_reader == old(self).shared_reader,
405 owner.range@.start < owner.range@.end ==> self.readers == old(self).readers.push(owner),
406 {
407 let tracked mv = match owner.mem_view {
408 Some(VmIoMemView::ReadView(mv)) => mv,
409 _ => { proof_from_false() },
410 };
411
412 if owner.range@.start < owner.range@.end {
413 self.readers.tracked_push(owner);
415 }
416 }
417
418 pub proof fn dispose_writer(tracked &mut self, tracked owner: VmIoOwner<'a>)
425 requires
426 old(self).inv(),
427 old(self).active,
428 owner.inv(),
429 old(self).mv_range@ matches Some(total_view) && owner.mem_view matches Some(
430 VmIoMemView::WriteView(mv),
431 ) && old(self).mem_view matches Some(remaining) && mv.mappings.finite() && {
432 &&& forall|va: usize|
433 #![auto]
434 {
435 &&& mv.addr_transl(va) == total_view.addr_transl(va)
436 &&& mv.addr_transl(va) matches Some(_) ==> {
437 &&& remaining.addr_transl(va) is None
438 &&& !remaining.memory.contains_key(va)
439 }
440 }
441 &&& mv.mappings.disjoint(remaining.mappings)
442 &&& mv.mappings.subset_of(total_view.mappings)
443 &&& mv.memory.dom().subset_of(total_view.memory.dom())
444 },
445 forall|i: int|
446 #![trigger old(self).writers[i]]
447 0 <= i < old(self).writers.len() as int ==> old(self).writers[i].disjoint(owner),
448 forall|i: int|
449 #![trigger old(self).readers[i]]
450 0 <= i < old(self).readers.len() as int ==> old(self).readers[i].disjoint(owner),
451 ensures
452 self.inv(),
453 self.active == old(self).active,
454 self.shared_reader == old(self).shared_reader,
455 owner.range@.start < owner.range@.end ==> self.writers == old(self).writers.push(owner),
456 {
457 match owner.mem_view {
461 Some(VmIoMemView::WriteView(ref writer_mv)) => {
462 if owner.range@.start < owner.range@.end {
463 self.writers.tracked_push(owner);
464 }
465 },
466 _ => {
467 assert(false);
468 },
469 }
470 }
471
472 pub proof fn remove_writer(tracked &mut self, idx: usize)
473 requires
474 old(self).inv(),
475 old(self).active,
476 old(self).mem_view is Some,
477 old(self).mv_range@ is Some,
478 0 <= idx < old(self).writers.len() as int,
479 ensures
480 self.inv(),
481 self.active == old(self).active,
482 self.shared_reader == old(self).shared_reader,
483 self.writers == old(self).writers.remove(idx as int),
484 {
485 let tracked writer = self.writers.tracked_remove(idx as int);
486
487 let tracked mv = match writer.mem_view {
489 Some(VmIoMemView::WriteView(mv)) => mv,
490 _ => { proof_from_false() },
491 };
492
493 let tracked mut remaining = self.mem_view.tracked_take();
495 let ghost old_remaining = remaining;
496 remaining.join(mv);
497 self.mem_view = Some(remaining);
498
499 assert(self.mem_view_wf()) by {
500 let ghost total_view = self.mv_range@.unwrap();
501
502 assert(remaining.mappings =~= old_remaining.mappings.union(mv.mappings));
503 assert(remaining.memory =~= old_remaining.memory.union_prefer_right(mv.memory));
504 assert(self.mv_range == old(self).mv_range);
505 assert(self.mem_view == Some(remaining));
506
507 assert forall|va: usize|
508 #![auto]
509 { remaining.addr_transl(va) == total_view.addr_transl(va) } by {
510 let r_mappings = remaining.mappings.filter(
511 |m: Mapping| m.va_range.start <= va < m.va_range.end,
512 );
513 let t_mappings = total_view.mappings.filter(
514 |m: Mapping| m.va_range.start <= va < m.va_range.end,
515 );
516 let w_mappings = mv.mappings.filter(
517 |m: Mapping| m.va_range.start <= va < m.va_range.end,
518 );
519
520 assert(r_mappings.subset_of(t_mappings));
521 assert(w_mappings.subset_of(t_mappings));
522
523 if r_mappings.len() > 0 {
524 assert(t_mappings.len() > 0) by {
525 let r = r_mappings.choose();
526 assert(r_mappings.contains(r)) by {
527 vstd::set::axiom_set_choose_len(r_mappings);
528 }
529 assert(t_mappings.contains(r));
530 }
531 }
532 }
533
534 assert forall|i: int|
535 #![trigger self.writers[i]]
536 0 <= i < self.writers.len() as int implies {
537 let other_writer = self.writers[i];
538
539 &&& other_writer.mem_view matches Some(VmIoMemView::WriteView(writer_mv))
540 && writer_mv.mappings.disjoint(remaining.mappings)
541 } by {
542 let other_writer = self.writers[i];
543
544 assert(old(self).inv());
545 let writer_mv = match other_writer.mem_view {
546 Some(VmIoMemView::WriteView(mv)) => mv,
547 _ => { proof_from_false() },
548 };
549
550 assert(mv.mappings.disjoint(writer_mv.mappings)) by {
551 assert(exists|i: int|
552 0 <= i < old(self).writers.len() as int ==> #[trigger] old(self).writers[i]
553 == other_writer);
554 assert(exists|i: int|
555 0 <= i < old(self).writers.len() as int ==> #[trigger] old(self).writers[i]
556 == writer);
557 }
558 }
559 }
560 }
561}
562
563#[derive(Clone, Debug)]
565pub struct UserPtConfig {}
566
567#[derive(Clone)]
569pub struct MappedItem {
570 pub frame: UFrame,
571 pub prop: PageProperty,
572}
573
574unsafe impl PageTableConfig for UserPtConfig {
576 open spec fn TOP_LEVEL_INDEX_RANGE_spec() -> Range<usize> {
577 0..256
578 }
579
580 open spec fn TOP_LEVEL_CAN_UNMAP_spec() -> (b: bool) {
581 true
582 }
583
584 fn TOP_LEVEL_INDEX_RANGE() -> Range<usize> {
585 0..256
586 }
587
588 fn TOP_LEVEL_CAN_UNMAP() -> (b: bool) {
589 true
590 }
591
592 type E = PageTableEntry;
593
594 type C = PagingConsts;
595
596 type Item = MappedItem;
597
598 uninterp spec fn item_into_raw_spec(item: Self::Item) -> (Paddr, PagingLevel, PageProperty);
599
600 #[verifier::external_body]
601 fn item_into_raw(item: Self::Item) -> (Paddr, PagingLevel, PageProperty) {
602 unimplemented!()
603 }
604
605 uninterp spec fn item_from_raw_spec(
606 paddr: Paddr,
607 level: PagingLevel,
608 prop: PageProperty,
609 ) -> Self::Item;
610
611 #[verifier::external_body]
612 fn item_from_raw(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self::Item {
613 unimplemented!()
614 }
615}
616
617type Result<A> = core::result::Result<A, Error>;
618
619#[verus_verify]
620impl<'a> VmSpace<'a> {
621 pub uninterp spec fn new_spec() -> Self;
622
623 pub open spec fn reader_requires(
624 &self,
625 vm_owner: VmSpaceOwner<'a>,
626 vaddr: Vaddr,
627 len: usize,
628 ) -> bool {
629 &&& self.inv()
630 &&& vm_owner.inv_with(*self)
631 &&& vm_owner.inv()
632 &&& vm_owner.active
633 &&& vm_owner.can_create_reader(vaddr, len)
634 &&& vaddr != 0 && len > 0 && vaddr + len <= MAX_USERSPACE_VADDR_SPEC()
635 &&& current_page_table_paddr_spec() == self.pt.root_paddr_spec()
636 }
637
638 pub open spec fn writer_requires(
639 &self,
640 vm_owner: VmSpaceOwner<'a>,
641 vaddr: Vaddr,
642 len: usize,
643 ) -> bool {
644 &&& self.inv()
645 &&& vm_owner.inv_with(*self)
646 &&& vm_owner.inv()
647 &&& vm_owner.active
648 &&& vm_owner.can_create_writer(vaddr, len)
649 &&& vaddr != 0 && len > 0 && vaddr + len <= MAX_USERSPACE_VADDR_SPEC()
650 &&& current_page_table_paddr_spec() == self.pt.root_paddr_spec()
651 }
652
653 pub open spec fn reader_ensures(
654 &self,
655 vm_owner_old: VmSpaceOwner<'_>,
656 vm_owner_new: VmSpaceOwner<'_>,
657 vaddr: Vaddr,
658 len: usize,
659 r: Result<VmReader<'_>>,
660 r_owner: Option<VmIoOwner<'_>>,
661 ) -> bool {
662 &&& vm_owner_new.inv()
663 &&& vm_owner_new.readers == vm_owner_old.readers
664 &&& vm_owner_new.writers == vm_owner_old.writers
665 &&& r matches Ok(reader) && r_owner matches Some(owner) ==> {
666 &&& reader.inv()
667 &&& owner.inv_with_reader(reader)
668 &&& owner.mem_view is None
669 }
670 }
671
672 pub open spec fn writer_ensures(
673 &self,
674 vm_owner_old: VmSpaceOwner<'a>,
675 vm_owner_new: VmSpaceOwner<'a>,
676 vaddr: Vaddr,
677 len: usize,
678 r: Result<VmWriter<'a>>,
679 r_owner: Option<VmIoOwner<'a>>,
680 ) -> bool {
681 &&& vm_owner_new.inv()
682 &&& vm_owner_new.readers == vm_owner_old.readers
683 &&& vm_owner_new.writers == vm_owner_old.writers
684 &&& r matches Ok(writer) && r_owner matches Some(owner) ==> {
685 &&& writer.inv()
686 &&& owner.inv_with_writer(writer)
687 &&& owner.mem_view is None
688 }
689 }
690
691 #[inline]
693 #[verifier::external_body]
694 #[verifier::when_used_as_spec(new_spec)]
695 #[verus_spec(r =>
696 ensures
697 r == Self::new_spec(),
698 r.inv(),
699 )]
700 pub fn new() -> Self {
701 unimplemented!()
702 }
703
704 #[verifier::external_body]
713 pub fn cursor<G: InAtomicMode>(&'a self, guard: &'a G, va: &Range<Vaddr>) -> Result<
714 Cursor<'a, G>,
715 > {
716 Ok(self.pt.cursor(guard, va).map(|pt_cursor| Cursor(pt_cursor.0))?)
717 }
718
719 pub fn cursor_mut<G: InAtomicMode>(&'a self, guard: &'a G, va: &Range<Vaddr>) -> Result<
730 CursorMut<'a, G>,
731 > {
732 Ok(
733 self.pt.cursor_mut(guard, va).map(
734 |pt_cursor|
735 CursorMut {
736 pt_cursor:
737 pt_cursor.0,
738 },
740 )?,
741 )
742 }
743
744 #[inline(always)]
746 #[verus_spec(r =>
747 with
748 Tracked(vm_space_owner): Tracked<&'a mut VmSpaceOwner<'a>>,
749 Tracked(owner_r): Tracked<&'a mut VmIoOwner<'a>>,
750 requires
751 old(self).inv(),
752 old(vm_space_owner).mem_view matches Some(mv) &&
753 forall |va: usize|
754 #![auto]
755 old(owner_r).range@.start <= va < old(owner_r).range@.end ==>
756 mv.addr_transl(va) is Some
757 ,
758 old(vm_space_owner).inv_with(*old(self)),
759 old(vm_space_owner).inv(),
760 old(vm_space_owner).active,
761 old(owner_r).inv_with_reader(*reader),
762 old(owner_r).mem_view is None,
763 reader.inv(),
764 ensures
765 self.inv(),
766 self.shared_reader == old(self).shared_reader,
767 self.readers@ == old(self).readers@.push(reader),
768 owner_r.inv_with_reader(*reader),
769 owner_r.mem_view == Some(VmIoMemView::ReadView(&old(vm_space_owner).mem_view@.unwrap().borrow_at_spec(
770 old(owner_r).range@.start,
771 (old(owner_r).range@.end - old(owner_r).range@.start) as usize,
772 ))),
773 )]
774 pub fn activate_reader(&mut self, reader: &'a VmReader<'a>) {
775 self.readers.push(reader);
776
777 proof {
778 let tracked mv = match vm_space_owner.mem_view {
779 Some(ref mv) => mv,
780 _ => { proof_from_false() },
781 };
782 let tracked borrowed_mv = mv.borrow_at(
783 owner_r.range@.start,
784 (owner_r.range@.end - owner_r.range@.start) as usize,
785 );
786
787 owner_r.mem_view = Some(VmIoMemView::ReadView(borrowed_mv));
788
789 assert forall|va: usize|
790 #![auto]
791 owner_r.range@.start <= va < owner_r.range@.end implies borrowed_mv.addr_transl(
792 va,
793 ) is Some by {
794 if owner_r.range@.start <= va && va < owner_r.range@.end {
795 assert(borrowed_mv.mappings =~= mv.mappings.filter(
796 |m: Mapping|
797 m.va_range.start < (owner_r.range@.end) && m.va_range.end
798 > owner_r.range@.start,
799 ));
800 let o_borrow_mv = borrowed_mv.mappings.filter(
801 |m: Mapping| m.va_range.start <= va < m.va_range.end,
802 );
803 let o_mv = mv.mappings.filter(
804 |m: Mapping| m.va_range.start <= va < m.va_range.end,
805 );
806 assert(mv.addr_transl(va) is Some);
807 assert(o_mv.len() > 0);
808 assert(o_borrow_mv.len() > 0) by {
809 let m = o_mv.choose();
810 assert(o_mv.contains(m)) by {
811 vstd::set::axiom_set_choose_len(o_mv);
812 }
813 assert(o_borrow_mv.contains(m));
814 }
815 }
816 }
817 }
818 }
819
820 #[inline(always)]
822 #[verus_spec(r =>
823 with
824 Tracked(vm_space_owner): Tracked<&'a mut VmSpaceOwner<'a>>,
825 Tracked(owner_w): Tracked<&'a mut VmIoOwner<'a>>,
826 requires
827 old(self).inv(),
828 old(vm_space_owner).mem_view matches Some(mv) &&
829 forall |va: usize|
830 #![auto]
831 old(owner_w).range@.start <= va < old(owner_w).range@.end ==>
832 mv.addr_transl(va) is Some
833 ,
834 old(vm_space_owner).inv_with(*old(self)),
835 old(vm_space_owner).inv(),
836 old(vm_space_owner).active,
837 old(owner_w).inv_with_writer(*writer),
838 old(owner_w).mem_view is None,
839 writer.inv(),
840 ensures
841 self.inv(),
842 self.shared_reader == old(self).shared_reader,
843 self.writers@ == old(self).writers@.push(writer),
844 owner_w.inv_with_writer(*writer),
845 owner_w.mem_view == Some(VmIoMemView::WriteView(old(vm_space_owner).mem_view@.unwrap().split_spec(
846 old(owner_w).range@.start,
847 (old(owner_w).range@.end - old(owner_w).range@.start) as usize,
848 ).0)),
849 )]
850 pub fn activate_writer(&mut self, writer: &'a VmWriter<'a>) {
851 self.writers.push(writer);
852
853 proof {
854 let tracked mut mv = vm_space_owner.mem_view.tracked_take();
855 let ghost old_mv = mv;
856 let tracked (lhs, rhs) = mv.split(
857 owner_w.range@.start,
858 (owner_w.range@.end - owner_w.range@.start) as usize,
859 );
860
861 owner_w.mem_view = Some(VmIoMemView::WriteView(lhs));
862 vm_space_owner.mem_view = Some(rhs);
863
864 assert forall|va: usize|
865 #![auto]
866 owner_w.range@.start <= va < owner_w.range@.end implies lhs.addr_transl(va) is Some by {
867 if owner_w.range@.start <= va && va < owner_w.range@.end {
868 assert(lhs.mappings =~= old_mv.mappings.filter(
869 |m: Mapping|
870 m.va_range.start < (owner_w.range@.end) && m.va_range.end
871 > owner_w.range@.start,
872 ));
873 let o_lhs = lhs.mappings.filter(
874 |m: Mapping| m.va_range.start <= va < m.va_range.end,
875 );
876 let o_mv = old_mv.mappings.filter(
877 |m: Mapping| m.va_range.start <= va < m.va_range.end,
878 );
879
880 assert(old_mv.addr_transl(va) is Some);
881 assert(o_mv.len() > 0);
882 assert(o_lhs.len() > 0) by {
883 broadcast use vstd::set::axiom_set_choose_len;
884
885 let m = o_mv.choose();
886 assert(o_mv.contains(m));
887 assert(m.va_range.start <= va < m.va_range.end);
888 assert(o_lhs.contains(m));
889 }
890 }
891 }
892 }
893 }
894
895 #[inline]
904 #[verus_spec(r =>
905 with
906 Tracked(owner): Tracked<&'a mut VmSpaceOwner<'a>>,
907 -> vm_reader_owner: Tracked<Option<VmIoOwner<'a>>>,
908 requires
909 self.reader_requires(*old(owner), vaddr, len),
910 ensures
911 self.reader_ensures(*old(owner), *owner, vaddr, len, r, vm_reader_owner@),
912 )]
913 pub fn create_reader(&self, vaddr: Vaddr, len: usize) -> Result<VmReader<'a>> {
914 let vptr = VirtPtr::from_vaddr(vaddr, len);
915 let ghost id = owner.new_vm_io_id();
916 proof_decl! {
917 let tracked mut vm_reader_owner;
918 }
919 let reader = unsafe {
921 proof_with!(Ghost(id) => Tracked(vm_reader_owner));
922 VmReader::from_user_space(vptr)
923 };
924
925 proof_with!(|= Tracked(Some(vm_reader_owner)));
926 Ok(reader)
927 }
928
929 #[inline]
935 #[verifier::external_body]
936 #[verus_spec(r =>
937 with
938 Tracked(owner): Tracked<&mut VmSpaceOwner<'a>>,
939 -> new_owner: Tracked<Option<VmIoOwner<'a>>>,
940 requires
941 self.writer_requires(*old(owner), vaddr, len),
942 ensures
943 self.writer_ensures(*old(owner), *owner, vaddr, len, r, new_owner@),
944 )]
945 pub fn create_writer(self, vaddr: Vaddr, len: usize) -> Result<VmWriter<'a>> {
946 let vptr = VirtPtr::from_vaddr(vaddr, len);
947 let ghost id = owner.new_vm_io_id();
948 proof_decl! {
949 let tracked mut vm_writer_owner;
950 }
951
952 let writer = unsafe {
958 proof_with!(Ghost(id) => Tracked(vm_writer_owner));
959 VmWriter::from_user_space(vptr)
960 };
961
962 proof_with!(|= Tracked(Some(vm_writer_owner)));
963 Ok(writer)
964 }
965}
966
967pub struct Cursor<'a, A: InAtomicMode>(pub crate::mm::page_table::Cursor<'a, UserPtConfig, A>);
981
982#[verus_verify]
993impl<'rcu, A: InAtomicMode> Cursor<'rcu, A> {
994
995 pub open spec fn invariants(
996 self,
997 owner: CursorOwner<'rcu, UserPtConfig>,
998 regions: MetaRegionOwners,
999 guards: Guards<'rcu, UserPtConfig>,
1000 ) -> bool {
1001 &&& self.0.inv()
1002 &&& self.0.wf(owner)
1003 &&& owner.inv()
1004 &&& regions.inv()
1005 &&& owner.children_not_locked(guards)
1006 &&& owner.nodes_locked(guards)
1007 &&& owner.relate_region(regions)
1008 &&& !owner.popped_too_high
1009 }
1010
1011 pub open spec fn query_success_requires(self) -> bool {
1012 self.0.barrier_va.start <= self.0.va < self.0.barrier_va.end
1013 }
1014
1015 pub open spec fn query_success_ensures(
1016 self,
1017 view: CursorView<UserPtConfig>,
1018 range: Range<Vaddr>,
1019 item: Option<MappedItem>
1020 ) -> bool {
1021 if view.present() {
1022 let found_item = view.query_item_spec();
1023 &&& range.start == found_item.va_range.start
1024 &&& range.end == found_item.va_range.end
1025 &&& item is Some
1026 &&& meta_to_frame(item.unwrap().frame.ptr.addr()) == found_item.pa_range.start
1027 } else {
1028 &&& range.start == self.0.va
1029 &&& item is None
1030 }
1031 }
1032
1033 #[verus_spec(r =>
1044 with Tracked(owner): Tracked<&mut CursorOwner<'rcu, UserPtConfig>>,
1045 Tracked(regions): Tracked<&mut MetaRegionOwners>,
1046 Tracked(guards): Tracked<&mut Guards<'rcu, UserPtConfig>>
1047 requires
1048 old(self).invariants(*old(owner), *old(regions), *old(guards))
1049 ensures
1050 self.invariants(*owner, *regions, *guards),
1051 self.query_success_requires() ==> {
1052 &&& r is Ok
1053 &&& self.query_success_ensures(self.0.model(*owner), r.unwrap().0, r.unwrap().1)
1054 }
1055 )]
1056 pub fn query(&mut self) -> Result<(Range<Vaddr>, Option<MappedItem>)>
1057 {
1058 proof { admit() };
1059 Ok(
1060 #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
1061 self.0.query()?,
1062 )
1063 }
1064
1065 #[verus_spec(r =>
1078 with Tracked(owner): Tracked<&mut CursorOwner<'rcu, UserPtConfig>>,
1079 Tracked(regions): Tracked<&mut MetaRegionOwners>,
1080 Tracked(guards): Tracked<&mut Guards<'rcu, UserPtConfig>>
1081 )]
1082 pub fn find_next(&mut self, len: usize) -> (res: Option<Vaddr>)
1083 requires
1084 old(owner).inv(),
1085 old(self).0.wf(*old(owner)),
1086 old(regions).inv(),
1087 old(self).0.level < old(self).0.guard_level,
1088 old(self).0.inv(),
1089 old(owner).in_locked_range(),
1090 old(owner).children_not_locked(*old(guards)),
1091 old(owner).nodes_locked(*old(guards)),
1092 old(owner).relate_region(*old(regions)),
1093 !old(owner).popped_too_high,
1094 len % PAGE_SIZE() == 0,
1095 old(self).0.va + len <= old(self).0.barrier_va.end,
1096 ensures
1097 owner.inv(),
1098 self.0.wf(*owner),
1099 regions.inv(),
1100 {
1101 #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
1102 self.0.find_next(len)
1103 }
1104
1105 #[verus_spec(
1107 with Tracked(owner): Tracked<&mut CursorOwner<'rcu, UserPtConfig>>,
1108 Tracked(regions): Tracked<&mut MetaRegionOwners>,
1109 Tracked(guards): Tracked<&mut Guards<'rcu, UserPtConfig>>
1110 )]
1111 pub fn jump(&mut self, va: Vaddr) -> Result<()>
1112 requires
1113 old(owner).inv(),
1114 old(self).0.wf(*old(owner)),
1115 old(self).0.level < old(self).0.guard_level,
1116 old(self).0.inv(),
1117 old(owner).in_locked_range(),
1118 old(owner).children_not_locked(*old(guards)),
1119 old(owner).nodes_locked(*old(guards)),
1120 old(owner).relate_region(*old(regions)),
1121 {
1122 (#[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
1123 self.0.jump(va))?;
1124 Ok(())
1125 }
1126
1127 pub fn virt_addr(&self) -> Vaddr {
1129 self.0.virt_addr()
1130 }
1131}
1132
1133pub struct CursorMut<'a, A: InAtomicMode> {
1138 pub pt_cursor: crate::mm::page_table::CursorMut<
1139 'a,
1140 UserPtConfig,
1141 A,
1142 >,
1143 }
1147
1148impl<'a, A: InAtomicMode> CursorMut<'a, A> {
1149 pub open spec fn query_requries(
1150 cursor: Self,
1151 owner: CursorOwner<'a, UserPtConfig>,
1152 guard_perm: vstd::simple_pptr::PointsTo<PageTableGuard<'a, UserPtConfig>>,
1153 regions: MetaRegionOwners,
1154 ) -> bool {
1155 &&& cursor.pt_cursor.inner.wf(owner)
1156 &&& owner.inv()
1157 &&& regions.inv()
1158 }
1159
1160 pub open spec fn query_ensures(
1161 old_cursor: Self,
1162 new_cursor: Self,
1163 owner: CursorOwner<'a, UserPtConfig>,
1164 guard_perm: vstd::simple_pptr::PointsTo<PageTableGuard<'a, UserPtConfig>>,
1165 old_regions: MetaRegionOwners,
1166 new_regions: MetaRegionOwners,
1167 r: Result<(Range<Vaddr>, Option<MappedItem>)>,
1168 ) -> bool {
1169 &&& new_regions.inv()
1170 &&& new_cursor.pt_cursor.inner.wf(owner)
1171 }
1172
1173 #[verus_spec(
1180 with Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
1181 Tracked(regions): Tracked<&mut MetaRegionOwners>,
1182 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>
1183 requires
1184 old(owner).inv(),
1185 old(self).pt_cursor.inner.wf(*old(owner)),
1186 old(regions).inv(),
1187 old(self).pt_cursor.inner.inv(),
1188 old(owner).children_not_locked(*old(guards)),
1189 old(owner).nodes_locked(*old(guards)),
1190 old(owner).relate_region(*old(regions)),
1191 old(owner).in_locked_range(),
1192 !old(owner).popped_too_high,
1193 )]
1194 pub fn query(&mut self) -> Result<(Range<Vaddr>, Option<MappedItem>)> {
1195 Ok(
1196 #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
1197 self.pt_cursor.query()?,
1198 )
1199 }
1200
1201 #[verus_spec(res =>
1205 with Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
1206 Tracked(regions): Tracked<&mut MetaRegionOwners>,
1207 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>
1208 )]
1209 pub fn find_next(&mut self, len: usize) -> (res: Option<Vaddr>)
1210 requires
1211 old(owner).inv(),
1212 old(self).pt_cursor.inner.wf(*old(owner)),
1213 old(regions).inv(),
1214 old(self).pt_cursor.inner.level < old(self).pt_cursor.inner.guard_level,
1215 old(self).pt_cursor.inner.inv(),
1216 old(owner).in_locked_range(),
1217 old(owner).children_not_locked(*old(guards)),
1218 old(owner).nodes_locked(*old(guards)),
1219 old(owner).relate_region(*old(regions)),
1220 !old(owner).popped_too_high,
1221 len % PAGE_SIZE() == 0,
1222 old(self).pt_cursor.inner.va + len <= old(self).pt_cursor.inner.barrier_va.end,
1223 ensures
1224 owner.inv(),
1225 self.pt_cursor.inner.wf(*owner),
1226 regions.inv(),
1227 {
1228 #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
1229 self.pt_cursor.find_next(len)
1230 }
1231
1232 #[verus_spec(r =>
1236 with
1237 Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
1238 Tracked(regions): Tracked<&mut MetaRegionOwners>,
1239 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>
1240 requires
1241 old(owner).inv(),
1242 old(self).pt_cursor.inner.wf(*old(owner)),
1243 old(self).pt_cursor.inner.level < old(self).pt_cursor.inner.guard_level,
1244 old(self).pt_cursor.inner.inv(),
1245 )]
1246 pub fn jump(&mut self, va: Vaddr) -> Result<()> {
1247 (#[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
1248 self.pt_cursor.jump(va))?;
1249 Ok(())
1250 }
1251
1252 #[verus_spec(r =>
1254 returns
1255 self.pt_cursor.inner.va,
1256 )]
1257 pub fn virt_addr(&self) -> Vaddr {
1258 self.pt_cursor.virt_addr()
1259 }
1260
1261 pub open spec fn map_cursor_inv(self, cursor_owner: CursorOwner<'a, UserPtConfig>, guards: Guards<'a, UserPtConfig>, regions: MetaRegionOwners) -> bool {
1271 &&& cursor_owner.inv()
1272 &&& self.pt_cursor.inner.wf(cursor_owner)
1273 &&& self.pt_cursor.inner.inv()
1274 &&& cursor_owner.children_not_locked(guards)
1275 &&& cursor_owner.nodes_locked(guards)
1276 &&& cursor_owner.relate_region(regions)
1277 &&& !cursor_owner.popped_too_high
1278 &&& regions.inv()
1279 }
1280
1281 pub open spec fn map_cursor_requires(self, cursor_owner: CursorOwner<'a, UserPtConfig>) -> bool {
1285 &&& cursor_owner.in_locked_range()
1286 &&& self.pt_cursor.inner.level < self.pt_cursor.inner.guard_level
1287 &&& self.pt_cursor.inner.va < self.pt_cursor.inner.barrier_va.end
1288 }
1289
1290 pub open spec fn map_item_requires(self, frame: UFrame, prop: PageProperty, entry_owner: EntryOwner<UserPtConfig>) -> bool {
1294 let item = MappedItem { frame: frame, prop: prop };
1295 let (paddr, level, prop0) = UserPtConfig::item_into_raw_spec(item);
1296 &&& prop == prop0
1297 &&& entry_owner.frame.unwrap().mapped_pa == paddr
1298 &&& entry_owner.frame.unwrap().prop == prop
1299 &&& level <= UserPtConfig::HIGHEST_TRANSLATION_LEVEL()
1300 &&& 1 <= level <= NR_LEVELS() &&& Child::Frame(paddr, level, prop0).wf(entry_owner)
1302 &&& self.pt_cursor.inner.va + page_size(level) <= self.pt_cursor.inner.barrier_va.end
1303 &&& entry_owner.inv()
1304 &&& self.pt_cursor.inner.va % page_size(level) == 0
1305 }
1306
1307 pub open spec fn map_item_ensures(
1310 self,
1311 frame: UFrame,
1312 prop: PageProperty,
1313 old_cursor_view: CursorView<UserPtConfig>,
1314 cursor_view: CursorView<UserPtConfig>,
1315 ) -> bool {
1316 let item = MappedItem { frame: frame, prop: prop };
1317 let (paddr, level, prop0) = UserPtConfig::item_into_raw_spec(item);
1318 cursor_view == old_cursor_view.map_spec(paddr, page_size(level), prop)
1319 }
1320
1321 #[verus_spec(
1331 with Tracked(cursor_owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
1332 Tracked(entry_owner): Tracked<EntryOwner<UserPtConfig>>,
1333 Tracked(regions): Tracked<&mut MetaRegionOwners>,
1334 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>,
1335 )]
1336 pub fn map(&mut self, frame: UFrame, prop: PageProperty)
1337 requires
1338 old(self).map_cursor_requires(*old(cursor_owner)),
1339 old(self).map_cursor_inv(*old(cursor_owner), *old(guards), *old(regions)),
1340 old(self).map_item_requires(frame, prop, entry_owner),
1341 ensures
1342 self.map_cursor_inv(*cursor_owner, *guards, *regions),
1343 old(self).map_item_ensures(frame, prop, old(self).pt_cursor.inner.model(*old(cursor_owner)), self.pt_cursor.inner.model(*cursor_owner)),
1344 {
1345 let start_va = self.virt_addr();
1346 let item = MappedItem { frame: frame, prop: prop };
1347
1348 let Err(frag) = (
1350 #[verus_spec(with Tracked(cursor_owner), Tracked(entry_owner), Tracked(regions), Tracked(guards))]
1351 self.pt_cursor.map(item)) else {
1352 return ; };
1354
1355 }
1369
1370 #[verus_spec(r =>
1389 requires
1390 len % PAGE_SIZE() == 0,
1391 old(self).pt_cursor.inner.va % PAGE_SIZE() == 0,
1392 old(self).pt_cursor.inner.va + len <= KERNEL_VADDR_RANGE().end as int,
1393 ensures
1394 )]
1395 #[verifier::external_body]
1396 pub fn unmap(&mut self, len: usize) -> usize {
1397 let end_va = self.virt_addr() + len;
1398 let mut num_unmapped: usize = 0;
1399
1400 proof {
1401 assert((self.pt_cursor.inner.va + len) % PAGE_SIZE() as int == 0) by (compute);
1402 }
1403
1404 #[verus_spec(
1405 invariant_except_break
1406 self.pt_cursor.inner.va % PAGE_SIZE() == 0,
1407 end_va % PAGE_SIZE() == 0,
1408 )]
1409 loop {
1410 let Some(frag) = (unsafe { self.pt_cursor.take_next(end_va - self.virt_addr()) }) else {
1412 break ; };
1414
1415 match frag {
1416 PageTableFrag::Mapped { va, item, .. } => {
1417 let frame = item.frame;
1418 num_unmapped += 1;
1419 },
1422 PageTableFrag::StrayPageTable { pt, va, len, num_frames } => {
1423 num_unmapped += num_frames;
1424 },
1427 }
1428 }
1429
1430 num_unmapped
1433 }
1434
1435 #[verus_spec(r =>
1456 with Tracked(owner): Tracked<&mut CursorOwner<'a, UserPtConfig>>,
1457 Tracked(regions): Tracked<&mut MetaRegionOwners>,
1458 Tracked(guards): Tracked<&mut Guards<'a, UserPtConfig>>,
1459 requires
1460 old(regions).inv(),
1461 old(owner).inv(),
1462 !old(owner).cur_entry_owner().is_absent(),
1463 old(self).pt_cursor.inner.wf(*old(owner)),
1464 old(self).pt_cursor.inner.inv(),
1465 old(owner).in_locked_range(),
1466 !old(owner).popped_too_high,
1467 old(owner).children_not_locked(*old(guards)),
1468 old(owner).nodes_locked(*old(guards)),
1469 old(owner).relate_region(*old(regions)),
1470 len % PAGE_SIZE() == 0,
1471 old(self).pt_cursor.inner.level < NR_LEVELS(),
1472 old(self).pt_cursor.inner.va + len <= old(self).pt_cursor.inner.barrier_va.end,
1473 ensures
1474 )]
1475 pub fn protect_next(
1476 &mut self,
1477 len: usize,
1478 op: impl FnOnce(PageProperty) -> PageProperty,
1479 ) -> Option<Range<Vaddr>> {
1480 unsafe {
1482 #[verus_spec(with Tracked(owner), Tracked(regions), Tracked(guards))]
1483 self.pt_cursor.protect_next(len, op)
1484 }
1485 }
1486}
1487
1488}