1use vstd::arithmetic::power2::*;
3use vstd::prelude::*;
4use vstd::simple_pptr;
5use vstd::atomic::PermissionU64;
6use vstd::std_specs::clone::*;
7
8use vstd_extra::prelude::{lemma_usize_ilog2_ordered, lemma_usize_pow2_ilog2};
9
10use core::{
11 fmt::Debug,
12 intrinsics::transmute_unchecked,
13 ops::{Range, RangeInclusive},
14 sync::atomic::{AtomicUsize, Ordering},
15};
16
17use crate::mm::frame::meta::MetaSlot;
18
19use super::{
20 lemma_nr_subpage_per_huge_bounded,
21 kspace::KernelPtConfig,
22 nr_subpage_per_huge,
23 page_prop::PageProperty,
24 Paddr,
25 vm_space::UserPtConfig,
26 PagingConstsTrait,
27 PagingLevel,
28 Vaddr,
29};
30
31use crate::specs::mm::page_table::*;
32
33use crate::specs::arch::mm::*;
34use crate::specs::arch::paging_consts::PagingConsts;
35use crate::specs::mm::page_table::cursor::*;
36use crate::specs::task::InAtomicMode;
37
38use crate::mm::frame::meta::mapping::frame_to_index;
39use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
40use crate::specs::mm::frame::meta_owners::MetaPerm;
41use crate::mm::kspace::kvirt_area::disable_preempt;
42use crate::specs::arch::PageTableEntry;
43use vstd_extra::ownership::Inv;
44
45mod node;
46pub use node::*;
47mod cursor;
48
49pub(crate) use cursor::*;
50
51#[cfg(ktest)]
52mod test;
53
54verus! {
57
58#[derive(Clone, Copy, PartialEq, Eq, Debug)]
59pub enum PageTableError {
60 InvalidVaddrRange(Vaddr, Vaddr),
62 InvalidVaddr(Vaddr),
64 UnalignedVaddr,
66}
67
68pub trait RCClone: Sized {
69 spec fn clone_requires(self, perm: MetaRegionOwners) -> bool;
70
71 spec fn clone_ensures(self, old_perm: MetaRegionOwners, new_perm: MetaRegionOwners, res: Self) -> bool;
72
73 fn clone(&self, Tracked(perm): Tracked<&mut MetaRegionOwners>) -> (res: Self)
74 requires
75 self.clone_requires(*old(perm)),
76 ensures
77 res == *self,
78 self.clone_ensures(*old(perm), *final(perm), res),
79 final(perm).inv(),
80 final(perm).slots =~= old(perm).slots,
81 final(perm).slot_owners.dom() =~= old(perm).slot_owners.dom(),
82 ;
83}
84
85pub unsafe trait PageTableConfig: Clone + Debug + Send + Sync + 'static {
103 spec fn TOP_LEVEL_INDEX_RANGE_spec() -> Range<usize>;
110
111 open spec fn LEADING_BITS_spec() -> usize {
130 0
131 }
132
133 fn TOP_LEVEL_INDEX_RANGE() -> (r: Range<usize>)
134 ensures
135 r == Self::TOP_LEVEL_INDEX_RANGE_spec(),
136 ;
137
138 open spec fn TOP_LEVEL_CAN_UNMAP_spec() -> bool {
144 true
145 }
146
147 fn TOP_LEVEL_CAN_UNMAP() -> (b: bool)
148 ensures
149 b == Self::TOP_LEVEL_CAN_UNMAP_spec(),
150 ;
151
152 type E: PageTableEntryTrait;
154
155 type C: PagingConstsTrait;
157
158 proof fn axiom_nr_subpage_per_huge_eq_nr_entries()
159 ensures
160 Self::C::BASE_PAGE_SIZE() / Self::C::PTE_SIZE() == NR_ENTRIES,
161 ;
162
163
164 type Item: RCClone;
177
178 spec fn item_into_raw_spec(item: Self::Item) -> (Paddr, PagingLevel, PageProperty);
184
185 #[verifier::when_used_as_spec(item_into_raw_spec)]
186 fn item_into_raw(item: Self::Item) -> (res: (Paddr, PagingLevel, PageProperty))
187 ensures
188 1 <= res.1 <= NR_LEVELS,
189 res == Self::item_into_raw_spec(item),
190 res.0 % crate::specs::arch::mm::PAGE_SIZE == 0,
191 res.0 < crate::specs::arch::mm::MAX_PADDR,
192 res.0 % crate::mm::page_table::cursor::page_size(res.1) == 0,
193 res.0 + crate::mm::page_table::cursor::page_size(res.1) <= crate::specs::arch::mm::MAX_PADDR,
194 ;
195
196 spec fn item_from_raw_spec(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self::Item;
221
222 #[verifier::when_used_as_spec(item_from_raw_spec)]
223 fn item_from_raw(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self::Item
224 returns
225 Self::item_from_raw_spec(paddr, level, prop),
226 ;
227
228 proof fn clone_ensures_concrete(
235 item: Self::Item,
236 pa: Paddr,
237 old_regions: MetaRegionOwners,
238 new_regions: MetaRegionOwners,
239 res: Self::Item,
240 )
241 requires
242 item.clone_ensures(old_regions, new_regions, res),
243 Self::item_into_raw_spec(item).0 == pa,
244 res == item,
245 new_regions.inv(),
246 new_regions.slots =~= old_regions.slots,
247 new_regions.slot_owners.dom() =~= old_regions.slot_owners.dom(),
248 ensures
249 forall|i: usize| i != frame_to_index(pa) ==>
250 (#[trigger] new_regions.slot_owners[i] == old_regions.slot_owners[i]),
251 new_regions.slot_owners[frame_to_index(pa)].inner_perms.ref_count.value()
252 == old_regions.slot_owners[frame_to_index(pa)].inner_perms.ref_count.value() + 1,
253 new_regions.slot_owners[frame_to_index(pa)].inner_perms.ref_count.id()
254 == old_regions.slot_owners[frame_to_index(pa)].inner_perms.ref_count.id(),
255 new_regions.slot_owners[frame_to_index(pa)].inner_perms.storage
256 == old_regions.slot_owners[frame_to_index(pa)].inner_perms.storage,
257 new_regions.slot_owners[frame_to_index(pa)].inner_perms.vtable_ptr
258 == old_regions.slot_owners[frame_to_index(pa)].inner_perms.vtable_ptr,
259 new_regions.slot_owners[frame_to_index(pa)].inner_perms.in_list
260 == old_regions.slot_owners[frame_to_index(pa)].inner_perms.in_list,
261 new_regions.slot_owners[frame_to_index(pa)].paths_in_pt
262 == old_regions.slot_owners[frame_to_index(pa)].paths_in_pt,
263 new_regions.slot_owners[frame_to_index(pa)].self_addr
264 == old_regions.slot_owners[frame_to_index(pa)].self_addr,
265 new_regions.slot_owners[frame_to_index(pa)].raw_count
266 == old_regions.slot_owners[frame_to_index(pa)].raw_count,
267 new_regions.slot_owners[frame_to_index(pa)].usage
268 == old_regions.slot_owners[frame_to_index(pa)].usage,
269 ;
270
271 proof fn item_roundtrip(item: Self::Item, paddr: Paddr, level: PagingLevel, prop: PageProperty)
272 ensures
273 Self::item_into_raw_spec(item) == (paddr, level, prop) <==> Self::item_from_raw_spec(
274 paddr,
275 level,
276 prop,
277 ) == item,
278 ;
279
280 proof fn clone_requires_concrete(
286 item: Self::Item,
287 pa: Paddr,
288 level: PagingLevel,
289 prop: PageProperty,
290 regions: MetaRegionOwners,
291 )
292 requires
293 regions.inv(),
294 Self::item_from_raw_spec(pa, level, prop) == item,
295 crate::mm::frame::meta::has_safe_slot(pa),
296 regions.slots.contains_key(frame_to_index(pa)),
297 regions.slot_owners.contains_key(frame_to_index(pa)),
298 regions.slot_owners[frame_to_index(pa)].inner_perms.ref_count.value() > 0,
299 regions.slot_owners[frame_to_index(pa)].inner_perms.ref_count.value() + 1
300 < crate::specs::mm::frame::meta_owners::REF_COUNT_MAX,
301 ensures
302 item.clone_requires(regions),
303 ;
304}
305
306impl<C: PageTableConfig> PagingConstsTrait for C {
309 open spec fn BASE_PAGE_SIZE_spec() -> usize {
310 C::C::BASE_PAGE_SIZE_spec()
311 }
312
313 fn BASE_PAGE_SIZE() -> usize {
314 C::C::BASE_PAGE_SIZE()
315 }
316
317 open spec fn NR_LEVELS_spec() -> PagingLevel {
318 C::C::NR_LEVELS_spec()
319 }
320
321 fn NR_LEVELS() -> PagingLevel {
322 C::C::NR_LEVELS()
323 }
324
325 open spec fn HIGHEST_TRANSLATION_LEVEL_spec() -> PagingLevel {
326 C::C::HIGHEST_TRANSLATION_LEVEL_spec()
327 }
328
329 fn HIGHEST_TRANSLATION_LEVEL() -> PagingLevel {
330 C::C::HIGHEST_TRANSLATION_LEVEL()
331 }
332
333 open spec fn PTE_SIZE_spec() -> usize {
334 C::C::PTE_SIZE_spec()
335 }
336
337 fn PTE_SIZE() -> usize {
338 C::C::PTE_SIZE()
339 }
340
341 open spec fn ADDRESS_WIDTH_spec() -> usize {
342 C::C::ADDRESS_WIDTH_spec()
343 }
344
345 fn ADDRESS_WIDTH() -> usize {
346 C::C::ADDRESS_WIDTH()
347 }
348
349 open spec fn VA_SIGN_EXT_spec() -> bool {
350 C::C::VA_SIGN_EXT_spec()
351 }
352
353 fn VA_SIGN_EXT() -> bool {
354 C::C::VA_SIGN_EXT()
355 }
356
357 proof fn lemma_BASE_PAGE_SIZE_properties()
358 ensures
359 0 < Self::BASE_PAGE_SIZE_spec(),
360 is_pow2(Self::BASE_PAGE_SIZE_spec() as int),
361 {
362 C::C::lemma_BASE_PAGE_SIZE_properties();
363 }
364
365 proof fn lemma_PTE_SIZE_properties()
366 ensures
367 0 < Self::PTE_SIZE_spec() <= Self::BASE_PAGE_SIZE(),
368 is_pow2(Self::PTE_SIZE_spec() as int),
369 {
370 C::C::lemma_PTE_SIZE_properties();
371 }
372}
373
374pub trait PageTableEntryTrait: Clone + Copy + Debug + Sized + Send + Sync + 'static {
378 spec fn default_spec() -> Self;
379
380 #[verifier::when_used_as_spec(default_spec)]
382 fn default() -> (res: Self)
383 ensures
384 res == Self::default_spec(),
385 ;
386
387 spec fn new_absent_spec() -> Self;
391
392 #[verifier(when_used_as_spec(new_absent_spec))]
393 fn new_absent() -> (res: Self)
394 ensures
395 res == Self::new_absent_spec(),
396 res.paddr() % PAGE_SIZE == 0,
397 res.paddr() < MAX_PADDR,
398 ;
399
400 spec fn is_present_spec(&self) -> bool;
402
403 #[verifier::when_used_as_spec(is_present_spec)]
404 fn is_present(&self) -> (res: bool)
405 ensures
406 res == self.is_present_spec(),
407 ;
408
409 spec fn new_page_spec(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self;
411
412 #[verifier::when_used_as_spec(new_page_spec)]
413 fn new_page(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> (res: Self)
414 requires
415 paddr % PAGE_SIZE == 0,
416 paddr < MAX_PADDR,
417 ensures
418 res == Self::new_page_spec(paddr, level, prop),
419 res.paddr() == paddr,
420 res.paddr() % PAGE_SIZE == 0,
421 res.paddr() < MAX_PADDR,
422 ;
423
424 spec fn new_pt_spec(paddr: Paddr) -> Self;
426
427 #[verifier::when_used_as_spec(new_pt_spec)]
428 fn new_pt(paddr: Paddr) -> (res: Self)
429 requires
430 paddr % PAGE_SIZE == 0,
431 paddr < MAX_PADDR,
432 ensures
433 res == Self::new_pt_spec(paddr),
434 res.paddr() == paddr,
435 res.paddr() % PAGE_SIZE == 0,
436 res.paddr() < MAX_PADDR,
437 ;
438
439 proof fn new_properties()
440 ensures
441 !Self::new_absent_spec().is_present(),
442 forall|level: PagingLevel|
443 #![trigger Self::new_absent_spec().is_last(level)]
444 1 < level ==> !Self::new_absent_spec().is_last(level),
445 forall|paddr: Paddr, level: PagingLevel, prop: PageProperty|
446 #![trigger Self::new_page_spec(paddr, level, prop)]
447 {
448 &&& Self::new_page_spec(paddr, level, prop).is_present()
449 &&& Self::new_page_spec(paddr, level, prop).paddr() == paddr
450 &&& Self::new_page_spec(paddr, level, prop).prop() == prop
451 &&& Self::new_page_spec(paddr, level, prop).is_last(level)
452 },
453 forall|paddr: Paddr|
454 #![trigger Self::new_pt_spec(paddr)]
455 {
456 &&& Self::new_pt_spec(paddr).is_present()
457 &&& Self::new_pt_spec(paddr).paddr() == paddr
458 &&& forall|level: PagingLevel| !Self::new_pt_spec(paddr).is_last(level)
459 },
460 ;
461
462 spec fn paddr_spec(&self) -> Paddr;
467
468 #[verifier::when_used_as_spec(paddr_spec)]
469 fn paddr(&self) -> (res: Paddr)
470 ensures
471 res == self.paddr_spec(),
472 ;
473
474 spec fn prop_spec(&self) -> PageProperty;
475
476 #[verifier::when_used_as_spec(prop_spec)]
477 fn prop(&self) -> (res: PageProperty)
478 ensures
479 res == self.prop_spec(),
480 ;
481
482 spec fn set_prop_spec(&self, prop: PageProperty) -> Self;
487
488 fn set_prop(&mut self, prop: PageProperty)
489 ensures
490 old(self).set_prop_spec(prop) == *final(self),
491 ;
492
493 proof fn set_prop_properties(self, prop: PageProperty)
494 ensures
495 self.set_prop_spec(prop).prop() == prop,
496 self.set_prop_spec(prop).paddr() == self.paddr(),
497 self.is_present() ==> self.set_prop_spec(prop).is_present(),
498 forall|level: PagingLevel|
499 #![trigger self.is_last(level)]
500 self.is_last(level) ==> self.set_prop_spec(prop).is_last(level),
501 ;
502
503 spec fn is_last_spec(&self, level: PagingLevel) -> bool;
508
509 #[verifier::when_used_as_spec(is_last_spec)]
510 fn is_last(&self, level: PagingLevel) -> (b: bool)
511 ensures
512 b == self.is_last_spec(level),
513 ;
514
515 spec fn as_usize_spec(self) -> usize;
517
518 #[verifier::external_body]
519 #[verifier::when_used_as_spec(as_usize_spec)]
520 fn as_usize(self) -> (res: usize)
521 ensures
522 res == self.as_usize_spec(),
523 {
524 unimplemented!()
525 }
529
530 #[verifier::external_body]
532 fn from_usize(pte_raw: usize) -> Self {
533 unimplemented!()
534 }
538}
539
540pub struct PageTable<C: PageTableConfig> {
543 pub root: PageTableNode<C>,
544}
545
546#[verifier::inline]
547pub open spec fn nr_pte_index_bits_spec<C: PagingConstsTrait>() -> usize {
548 nr_subpage_per_huge::<C>().ilog2() as usize
549}
550
551#[inline(always)]
553#[verifier::when_used_as_spec(nr_pte_index_bits_spec)]
554pub fn nr_pte_index_bits<C: PagingConstsTrait>() -> (res: usize)
555 ensures
556 res == nr_pte_index_bits_spec::<C>(),
557{
558 proof {
559 lemma_nr_subpage_per_huge_bounded::<C>();
560 }
561 nr_subpage_per_huge::<C>().ilog2() as usize
562}
563
564pub proof fn lemma_nr_pte_index_bits_bounded<C: PagingConstsTrait>()
565 ensures
566 0 <= nr_pte_index_bits::<C>() <= C::BASE_PAGE_SIZE().ilog2(),
567{
568 lemma_nr_subpage_per_huge_bounded::<C>();
569 let nr = nr_subpage_per_huge::<C>();
570 assert(1 <= nr <= C::BASE_PAGE_SIZE());
571 let bits = nr.ilog2();
572 assert(0 <= bits) by {
573 lemma_usize_ilog2_ordered(1, nr);
574 }
575 assert(bits <= C::BASE_PAGE_SIZE().ilog2()) by {
576 lemma_usize_ilog2_ordered(nr, C::BASE_PAGE_SIZE());
577 }
578}
579
580#[verifier::external_body]
605pub fn largest_pages<C: PageTableConfig>(
606 mut va: Vaddr,
607 mut pa: Paddr,
608 mut len: usize,
609) -> impl Iterator<Item = (Paddr, PagingLevel)> {
610 assert_eq!(va % C::BASE_PAGE_SIZE(), 0);
611 assert_eq!(pa % C::BASE_PAGE_SIZE(), 0);
612 assert_eq!(len % C::BASE_PAGE_SIZE(), 0);
613 assert!(is_valid_range::<C>(&(va..(va + len))));
614
615 core::iter::from_fn(
616 move ||
617 {
618 if len == 0 {
619 return None;
620 }
621 let mut level = C::HIGHEST_TRANSLATION_LEVEL();
622 while page_size(level) > len || va % page_size(level) != 0 || pa % page_size(level)
623 != 0 {
624 level -= 1;
625 }
626
627 let item_start = pa;
628 va += page_size(level);
629 pa += page_size(level);
630 len -= page_size(level);
631
632 Some((item_start, level))
633 },
634 )
635}
636
637#[verifier::external_body]
642fn top_level_index_width<C: PageTableConfig>() -> usize {
643 C::ADDRESS_WIDTH() - pte_index_bit_offset::<C>(C::NR_LEVELS())
644}
645
646#[verifier::external_body]
647fn pt_va_range_start<C: PageTableConfig>() -> Vaddr {
648 C::TOP_LEVEL_INDEX_RANGE().start << pte_index_bit_offset::<C>(C::NR_LEVELS())
649}
650
651#[verifier::external_body]
652fn pt_va_range_end<C: PageTableConfig>() -> Vaddr {
653 C::TOP_LEVEL_INDEX_RANGE().end.unbounded_shl(
654 pte_index_bit_offset::<C>(C::NR_LEVELS()) as u32,
655 ).wrapping_sub(1) }
658
659#[verifier::external_body]
660fn sign_bit_of_va<C: PageTableConfig>(va: Vaddr) -> bool {
661 (va >> (C::ADDRESS_WIDTH() - 1)) & 1 != 0
662}
663
664#[verifier::inline]
665pub open spec fn pte_index_bit_offset_spec<C: PagingConstsTrait>(level: PagingLevel) -> int {
666 (C::BASE_PAGE_SIZE().ilog2() as int) + (nr_pte_index_bits::<C>() as int) * (level as int - 1)
667}
668
669#[verifier::inline]
674pub open spec fn vaddr_range_spec<C: PageTableConfig>() -> Range<Vaddr> {
675 let idx_range = C::TOP_LEVEL_INDEX_RANGE_spec();
676 let offset = pte_index_bit_offset_spec::<C::C>(C::NR_LEVELS()) as nat;
677 let start = (idx_range.start as int) * pow2(offset);
678 let end_inclusive = (idx_range.end as int) * pow2(offset) - 1;
679 (start as Vaddr)..((end_inclusive + 1) as Vaddr)
680}
681
682#[verifier::inline]
684pub open spec fn is_valid_range_spec<C: PageTableConfig>(r: &Range<Vaddr>) -> bool {
685 let va_range = vaddr_range_spec::<C>();
686 (r.start == 0 && r.end == 0)
687 || (va_range.start <= r.start && r.end > 0 && r.end - 1 <= va_range.end - 1)
688}
689
690#[verifier::external_body]
691fn vaddr_range<C: PageTableConfig>() -> (ret: Range<Vaddr>)
692 ensures
693 ret == vaddr_range_spec::<C>(),
694{
695 let mut start = pt_va_range_start::<C>();
702 let mut end = pt_va_range_end::<C>();
703
704 if C::VA_SIGN_EXT() && sign_bit_of_va::<C>(pt_va_range_start::<C>()) {
714 start |= !0 ^ ((1 << C::ADDRESS_WIDTH()) - 1);
715 end |= !0 ^ ((1 << C::ADDRESS_WIDTH()) - 1);
716 }
717 start..end + 1
718}
719
720#[verifier::external_body]
722fn is_valid_range<C: PageTableConfig>(r: &Range<Vaddr>) -> (ret: bool)
723 ensures
724 ret == is_valid_range_spec::<C>(r),
725{
726 let va_range = vaddr_range::<C>();
727 (r.start == 0 && r.end == 0) || (va_range.start <= r.start && r.end - 1 <= va_range.end)
728}
729
730pub(crate) proof fn lemma_vaddr_range_spec_kernel()
736 ensures
737 vaddr_range_spec::<KernelPtConfig>()
738 == (0x0000_8000_0000_0000_usize..0x0001_0000_0000_0000_usize),
739{
740 use vstd::arithmetic::power2::{lemma2_to64, lemma2_to64_rest, lemma_pow2_adds};
741 lemma2_to64();
742 lemma2_to64_rest();
743 lemma_usize_pow2_ilog2(12);
744 assert(PagingConsts::BASE_PAGE_SIZE().ilog2() == 12u32);
745 assert(nr_subpage_per_huge::<PagingConsts>() == 512_usize);
746 lemma_usize_pow2_ilog2(9);
747 assert(nr_pte_index_bits::<PagingConsts>() == 9_usize);
748 assert(pte_index_bit_offset_spec::<PagingConsts>(4) == 39);
749 lemma_pow2_adds(8, 39);
750 lemma_pow2_adds(9, 39);
751 assert((256 as int) * pow2(39) == pow2(47));
752 assert((512 as int) * pow2(39) == pow2(48));
753}
754
755pub closed spec fn vaddr_range_bounds_spec<C: PageTableConfig>() -> (Vaddr, Vaddr) {
765 let idx = C::TOP_LEVEL_INDEX_RANGE_spec();
766 let off = pte_index_bit_offset_spec::<C::C>(C::NR_LEVELS()) as nat;
767 let lb = C::LEADING_BITS_spec() as int;
768 let base = lb * 0x1_0000_0000_0000int;
769 let start = (base + (idx.start as int) * pow2(off)) as usize;
770 let end_inclusive = (base + (idx.end as int) * pow2(off) - 1) as usize;
771 (start, end_inclusive)
772}
773
774pub(crate) proof fn lemma_vaddr_range_bounds_spec_user()
777 ensures
778 vaddr_range_bounds_spec::<crate::mm::vm_space::UserPtConfig>()
779 == (0_usize, 0x0000_7FFF_FFFF_FFFF_usize),
780{
781 use vstd::arithmetic::power2::{lemma2_to64, lemma2_to64_rest, lemma_pow2_adds};
782 lemma2_to64();
783 lemma2_to64_rest();
784 lemma_usize_pow2_ilog2(12);
785 lemma_usize_pow2_ilog2(9);
786 lemma_pow2_adds(8, 39);
787 assert(nr_subpage_per_huge::<PagingConsts>() == 512_usize);
788 assert(nr_pte_index_bits::<PagingConsts>() == 9_usize);
789 assert(pte_index_bit_offset_spec::<PagingConsts>(4) == 39);
790 assert((0 as int) * pow2(39) == 0);
791 assert((256 as int) * pow2(39) == pow2(47));
792 assert(pow2(47) as int - 1 == 0x0000_7FFF_FFFF_FFFF_int);
793 assert(<crate::mm::vm_space::UserPtConfig as PageTableConfig>::LEADING_BITS_spec() == 0_usize);
795}
796
797pub(crate) proof fn lemma_vaddr_range_bounds_spec_kernel()
800 ensures
801 vaddr_range_bounds_spec::<KernelPtConfig>()
802 == (0xFFFF_8000_0000_0000_usize, 0xFFFF_FFFF_FFFF_FFFF_usize),
803{
804 use vstd::arithmetic::power2::{lemma2_to64, lemma2_to64_rest, lemma_pow2_adds};
805 lemma2_to64();
806 lemma2_to64_rest();
807 lemma_usize_pow2_ilog2(12);
808 lemma_usize_pow2_ilog2(9);
809 lemma_pow2_adds(8, 39);
810 lemma_pow2_adds(9, 39);
811 assert(nr_subpage_per_huge::<PagingConsts>() == 512_usize);
812 assert(nr_pte_index_bits::<PagingConsts>() == 9_usize);
813 assert(PagingConsts::BASE_PAGE_SIZE().ilog2() == 12u32);
814 assert(pte_index_bit_offset_spec::<PagingConsts>(4) == 39);
815 assert((256 as int) * pow2(39) == pow2(47));
816 assert((512 as int) * pow2(39) == pow2(48));
817 assert(0xffff_int * 0x1_0000_0000_0000int + pow2(47) as int
818 == 0xffff_8000_0000_0000int);
819 assert(0xffff_int * 0x1_0000_0000_0000int + pow2(48) as int - 1
820 == 0xffff_ffff_ffff_ffffint);
821}
822
823
824
825#[verifier::external_body]
828fn pte_index<C: PagingConstsTrait>(va: Vaddr, level: PagingLevel) -> (res: usize)
829 ensures
830 res == AbstractVaddr::from_vaddr(va).index[level - 1],
831{
832 (va >> pte_index_bit_offset::<C>(level)) & (nr_subpage_per_huge::<C>() - 1)
833}
834
835#[verifier::external_body]
841fn pte_index_bit_offset<C: PagingConstsTrait>(level: PagingLevel) -> usize {
842 C::BASE_PAGE_SIZE().ilog2() as usize + nr_pte_index_bits::<C>() * (level as usize - 1)
843}
844
845impl PageTable<KernelPtConfig> {
857 pub open spec fn create_user_pt_panic_condition(root_owner: NodeOwner<KernelPtConfig>) -> bool {
861 exists|i: usize|
862 #![trigger root_owner.children_perm.value()[i as int]]
863 KernelPtConfig::TOP_LEVEL_INDEX_RANGE_spec().start <= i
864 < KernelPtConfig::TOP_LEVEL_INDEX_RANGE_spec().end
865 && {
866 let pte = root_owner.children_perm.value()[i as int];
867 ||| !pte.is_present()
868 ||| pte.is_last(root_owner.level)
869 }
870 }
871
872 #[verifier::external_body]
874 pub(crate) fn new_kernel_page_table() -> Self {
875 unimplemented!()
876}
891
892 #[verus_spec(r =>
897 with Tracked(kernel_owner): Tracked<&PageTableOwner<KernelPtConfig>>,
898 Tracked(regions): Tracked<&mut MetaRegionOwners>,
899 Tracked(guards_k): Tracked<&mut Guards<'static, KernelPtConfig>>,
900 Tracked(guards_u): Tracked<&mut Guards<'static, UserPtConfig>>,
901 requires
902 kernel_owner.inv(),
903 old(regions).inv(),
904 kernel_owner.0.value.node is Some,
905 !Self::create_user_pt_panic_condition(kernel_owner.0.value.node.unwrap()),
906 self.root.ptr.addr() == kernel_owner.0.value.node.unwrap().meta_perm.addr(),
908 kernel_owner.0.value.metaregion_sound(*old(regions)),
910 kernel_owner.metaregion_sound(*old(regions)),
914 old(guards_k).unlocked(kernel_owner.0.value.node.unwrap().meta_perm.addr()),
916 )]
917 pub(in crate::mm) fn create_user_page_table<G: InAtomicMode + 'static>(&'static self) -> PageTable<UserPtConfig> {
918 let preempt_guard: &G = disable_preempt::<G>();
919
920 proof_decl! {
921 let tracked mut new_pt_owner: Option<PageTableOwner<UserPtConfig>> = None;
922 }
923 let ghost regions_before_alloc = *regions;
924 let new_pt: PageTable<UserPtConfig> = (
925 #[verus_spec(with Tracked(&mut new_pt_owner), Tracked(regions), Tracked(guards_u))]
926 PageTable::empty_with_owner()
927 );
928 let new_root = new_pt.root;
929 let ghost new_idx_g: usize = crate::specs::mm::frame::mapping::frame_to_index(
931 new_pt_owner@.unwrap().0.value.meta_slot_paddr().unwrap());
932 let ghost new_pt_owner_snap = new_pt_owner@.unwrap();
933 proof {
934 let kern_idx = crate::specs::mm::frame::mapping::frame_to_index(
940 kernel_owner.0.value.meta_slot_paddr().unwrap());
941 let new_idx = new_idx_g;
942 crate::specs::mm::page_table::node::entry_owners::EntryOwner
945 ::<KernelPtConfig>::active_entry_not_in_free_pool(
946 kernel_owner.0.value, regions_before_alloc, new_idx);
947 assert(kern_idx != new_idx);
948 assert(regions.slot_owners[kern_idx]
949 == regions_before_alloc.slot_owners[kern_idx]);
950 assert(kernel_owner.metaregion_sound(*regions));
953 assert(!regions.slots.contains_key(new_idx));
956 }
957
958 proof_decl! {
959 let tracked root_owner: &NodeOwner<KernelPtConfig>
960 = kernel_owner.0.borrow_value().node.tracked_borrow();
961 let tracked root_perm: &MetaPerm<PageTablePageMeta<KernelPtConfig>>
962 = &root_owner.meta_perm;
963 let tracked mut new_pt_owner_val: PageTableOwner<UserPtConfig>
964 = new_pt_owner.tracked_take();
965 let tracked mut new_node_owner: NodeOwner<UserPtConfig>
966 = new_pt_owner_val.0.value.node.tracked_take();
967 let tracked mut root_guard_perm: GuardPerm<'static, KernelPtConfig>;
968 let tracked mut new_guard_perm_lock: GuardPerm<'static, UserPtConfig>;
969 let tracked mut entry_owner: &EntryOwner<KernelPtConfig>;
970 }
971
972 proof {
975 assert(kernel_owner.0.value.is_node());
976 assert(kernel_owner.0.value.metaregion_sound(*regions));
977 }
978 let ghost regions_before_self_borrow: MetaRegionOwners = *regions;
979 let root_node = {
980 #[verus_spec(with Tracked(regions), Tracked(root_perm))]
981 let root_ref = self.root.borrow();
982 #[verus_spec(with Tracked(root_owner), Tracked(guards_k) => Tracked(root_guard_perm))]
983 root_ref.lock(preempt_guard)
984 };
985 let ghost regions_after_kroot_borrow: MetaRegionOwners = *regions;
986 let new_node: vstd::simple_pptr::PPtr<PageTableGuard<'static, UserPtConfig>> = {
987 #[verus_spec(with Tracked(regions), Tracked(&new_node_owner.meta_perm))]
988 let new_ref = new_root.borrow();
989 #[verus_spec(with Tracked(&new_node_owner), Tracked(guards_u) => Tracked(new_guard_perm_lock))]
990 new_ref.lock(preempt_guard)
991 };
992 proof {
993 let kern_idx = crate::specs::mm::frame::mapping::frame_to_index(
1005 kernel_owner.0.value.meta_slot_paddr().unwrap());
1006 assert(regions_before_self_borrow.slot_owners
1007 =~= regions_after_kroot_borrow.slot_owners) by {
1008 assert(regions_before_self_borrow.slot_owners[kern_idx].raw_count == 1);
1009 assert(regions_after_kroot_borrow.slot_owners[kern_idx].raw_count == 1);
1010 }
1011 assert forall |k: usize|
1016 regions_before_self_borrow.slots.contains_key(k)
1017 implies regions_before_self_borrow.slots[k] == #[trigger] regions_after_kroot_borrow.slots[k]
1018 by {
1019 if k == kern_idx {
1020 crate::specs::mm::page_table::node::entry_owners::EntryOwner
1021 ::<KernelPtConfig>::active_entry_not_in_free_pool(
1022 kernel_owner.0.value, regions_before_self_borrow, k);
1023 }
1024 };
1025 kernel_owner.metaregion_sound_preserved_slot_owners_eq(
1026 regions_before_self_borrow, regions_after_kroot_borrow);
1027 let new_idx = new_idx_g;
1029 assert(regions_before_alloc.slots.contains_key(new_idx));
1033 assert(kern_idx != new_idx) by {
1034 crate::specs::mm::page_table::node::entry_owners::EntryOwner
1035 ::<KernelPtConfig>::active_entry_not_in_free_pool(
1036 kernel_owner.0.value, regions_before_alloc, new_idx);
1037 };
1038 assert(!regions_before_self_borrow.slots.contains_key(new_idx));
1042 assert(!regions_after_kroot_borrow.slots.contains_key(new_idx));
1046 assert forall |k: usize|
1048 regions_after_kroot_borrow.slots.contains_key(k)
1049 implies regions_after_kroot_borrow.slots[k] == #[trigger] regions.slots[k]
1050 by {
1051 if k != new_idx {
1052 }
1054 };
1055 assert(kernel_owner.metaregion_sound(regions_before_alloc));
1060 kernel_owner.0.map_implies(
1064 kernel_owner.0.value.path,
1065 |e: crate::specs::mm::page_table::node::entry_owners::EntryOwner<KernelPtConfig>,
1066 p: vstd_extra::ghost_tree::TreePath<NR_ENTRIES>|
1067 e.is_frame() && e.parent_level > 1 ==> {
1068 let pa = e.frame.unwrap().mapped_pa;
1069 let nr_pages = crate::mm::page_table::cursor::page_size_spec(
1070 e.parent_level) / crate::specs::arch::mm::PAGE_SIZE;
1071 forall |j: usize| 0 < j < nr_pages ==> {
1072 let sub_idx =
1073 #[trigger] crate::specs::mm::frame::mapping::frame_to_index(
1074 (pa + j * crate::specs::arch::mm::PAGE_SIZE) as usize);
1075 sub_idx != new_idx
1076 }
1077 },
1078 |e: crate::specs::mm::page_table::node::entry_owners::EntryOwner<KernelPtConfig>,
1079 p: vstd_extra::ghost_tree::TreePath<NR_ENTRIES>|
1080 e.is_frame() && e.parent_level > 1 ==> {
1081 let pa = e.frame.unwrap().mapped_pa;
1082 let nr_pages = crate::mm::page_table::cursor::page_size_spec(
1083 e.parent_level) / crate::specs::arch::mm::PAGE_SIZE;
1084 forall |j: usize| 0 < j < nr_pages ==> {
1085 let sub_idx =
1086 #[trigger] crate::specs::mm::frame::mapping::frame_to_index(
1087 (pa + j * crate::specs::arch::mm::PAGE_SIZE) as usize);
1088 sub_idx != new_idx
1089 || (
1090 regions.slots.contains_key(sub_idx)
1091 && regions.slot_owners[sub_idx].inner_perms.ref_count.value()
1092 != crate::specs::mm::frame::meta_owners::REF_COUNT_UNUSED
1093 && regions.slot_owners[sub_idx].inner_perms.ref_count.value() > 0
1094 )
1095 }
1096 },
1097 );
1098 kernel_owner.metaregion_sound_preserved_one_slot_changed(
1099 regions_after_kroot_borrow, *regions, new_idx);
1100 }
1101 let mut i: usize = KernelPtConfig::TOP_LEVEL_INDEX_RANGE().start;
1102 while i < KernelPtConfig::TOP_LEVEL_INDEX_RANGE().end
1103 invariant
1104 kernel_owner.inv(),
1105 kernel_owner.0.value.node is Some,
1106 regions.inv(),
1107 !Self::create_user_pt_panic_condition(kernel_owner.0.value.node.unwrap()),
1108 i <= KernelPtConfig::TOP_LEVEL_INDEX_RANGE_spec().end,
1109 KernelPtConfig::TOP_LEVEL_INDEX_RANGE_spec().start <= i,
1110 *root_owner == kernel_owner.0.value.node.unwrap(),
1112 root_owner.relate_guard_perm(root_guard_perm),
1113 root_node.addr() == root_guard_perm.addr(),
1114 kernel_owner.metaregion_sound(*regions),
1116 new_guard_perm_lock.pptr() == new_node,
1118 new_guard_perm_lock.is_init(),
1119 new_node_owner.inv(),
1122 new_node_owner.relate_guard_perm(new_guard_perm_lock),
1123 decreases KernelPtConfig::TOP_LEVEL_INDEX_RANGE_spec().end - i,
1124 {
1125 proof {
1126 let kern_node = kernel_owner.0.value.node.unwrap();
1127 assert forall |j: usize|
1128 #![trigger kern_node.children_perm.value()[j as int]]
1129 KernelPtConfig::TOP_LEVEL_INDEX_RANGE_spec().start <= j
1130 < KernelPtConfig::TOP_LEVEL_INDEX_RANGE_spec().end
1131 implies {
1132 let pte = kern_node.children_perm.value()[j as int];
1133 pte.is_present() && !pte.is_last(kern_node.level)
1134 } by {
1135 let pte = kern_node.children_perm.value()[j as int];
1136 if !pte.is_present() || pte.is_last(kern_node.level) {
1137 assert(Self::create_user_pt_panic_condition(kern_node));
1138 }
1139 }
1140
1141 kernel_owner.pt_inv_unroll(i as int);
1142 let tracked child_opt: &Option<OwnerSubtree<KernelPtConfig>>
1143 = kernel_owner.0.children.tracked_borrow(i as int);
1144 let tracked child_subtree: &OwnerSubtree<KernelPtConfig>
1145 = child_opt.tracked_borrow();
1146 entry_owner = child_subtree.borrow_value();
1147 let kern_node = kernel_owner.0.value.node.unwrap();
1148 assert(entry_owner.match_pte(
1149 kern_node.children_perm.value()[i as int],
1150 entry_owner.parent_level));
1151 assert(entry_owner.parent_level == kern_node.level);
1152 assert(child_subtree.inv());
1153 assert(entry_owner.inv());
1154 assert(!entry_owner.in_scope);
1155 assert(root_owner.relate_guard_perm(root_guard_perm));
1156 assert(root_guard_perm.addr() == root_node.addr());
1157 kernel_owner.0.map_unroll_once(
1161 kernel_owner.0.value.path,
1162 PageTableOwner::<KernelPtConfig>::metaregion_sound_pred(*regions),
1163 i as int);
1164 assert(child_subtree.tree_predicate_map(
1165 kernel_owner.0.value.path.push_tail(i as usize),
1166 PageTableOwner::<KernelPtConfig>::metaregion_sound_pred(*regions)));
1167 assert(entry_owner.metaregion_sound(*regions));
1168 }
1169
1170 #[verus_spec(with Tracked(root_owner), Tracked(entry_owner), Tracked(&root_guard_perm))]
1171 let root_entry = PageTableGuard::entry(root_node, i);
1172 let ghost pre_to_ref_regions: MetaRegionOwners = *regions;
1173 #[verus_spec(with Tracked(entry_owner), Tracked(root_owner), Tracked(regions), Tracked(&root_guard_perm))]
1174 let child = root_entry.to_ref();
1175
1176 proof {
1179 let kern_node = kernel_owner.0.value.node.unwrap();
1180 let pte = kern_node.children_perm.value()[i as int];
1181 assert(pte.is_present() && !pte.is_last(kern_node.level)) by {
1184 if !pte.is_present() || pte.is_last(kern_node.level) {
1185 assert(KernelPtConfig::TOP_LEVEL_INDEX_RANGE_spec().start <= i
1186 < KernelPtConfig::TOP_LEVEL_INDEX_RANGE_spec().end);
1187 assert(exists |j: usize|
1188 KernelPtConfig::TOP_LEVEL_INDEX_RANGE_spec().start <= j
1189 < KernelPtConfig::TOP_LEVEL_INDEX_RANGE_spec().end
1190 && {
1191 let p = #[trigger] kern_node.children_perm.value()[j as int];
1192 ||| !p.is_present()
1193 ||| p.is_last(kern_node.level)
1194 });
1195 assert(Self::create_user_pt_panic_condition(kern_node));
1196 }
1197 }
1198 assert(entry_owner.is_node());
1201 assert(child is PageTable);
1205 kernel_owner.metaregion_sound_preserved_slot_owners_eq(
1209 pre_to_ref_regions, *regions);
1210 }
1211 let pt = match child {
1212 ChildRef::PageTable(pt) => pt,
1213 _ => vstd::pervasive::unreached(),
1214 };
1215
1216 #[verus_spec(with Tracked(&entry_owner.node.tracked_borrow().meta_perm.points_to))]
1217 let pt_addr = pt.start_paddr();
1218 let pte = PageTableEntry::new_pt(pt_addr);
1219
1220 let mut new_guard = new_node.take(Tracked(&mut new_guard_perm_lock));
1221 #[verus_spec(with Tracked(&mut new_node_owner))]
1222 new_guard.write_pte(i, pte);
1223 new_node.put(Tracked(&mut new_guard_perm_lock), new_guard);
1224
1225 i = i + 1;
1226 }
1227
1228 PageTable::<UserPtConfig> { root: new_root }
1229 }
1230
1231 }
1256
1257#[verus_verify]
1258impl<C: PageTableConfig> PageTable<C> {
1259 pub uninterp spec fn root_paddr_spec(&self) -> Paddr;
1260
1261 #[verifier::external_body]
1265 pub fn empty() -> Self {
1266 unimplemented!()
1267 }
1268
1269 #[verifier::external_body]
1271 #[verus_spec(r =>
1272 with Tracked(owner): Tracked<&mut Option<PageTableOwner<C>>>,
1273 Tracked(regions): Tracked<&mut MetaRegionOwners>,
1274 Tracked(guards): Tracked<&mut Guards<'static, C>>,
1275 requires
1276 old(regions).inv(),
1277 ensures
1278 final(owner)@ is Some,
1279 final(owner)@.unwrap().inv(),
1280 final(owner)@.unwrap().0.value.is_node(),
1281 final(owner)@.unwrap().0.value.node is Some,
1282 r.root.ptr.addr() == final(owner)@.unwrap().0.value.node.unwrap().meta_perm.addr(),
1283 final(owner)@.unwrap().0.value.metaregion_sound(*final(regions)),
1284 final(regions).inv(),
1285 final(guards).unlocked(final(owner)@.unwrap().0.value.node.unwrap().meta_perm.addr()),
1286 old(regions).slots.contains_key(
1288 crate::specs::mm::frame::mapping::frame_to_index(
1289 final(owner)@.unwrap().0.value.meta_slot_paddr().unwrap())),
1290 !final(regions).slots.contains_key(
1293 crate::specs::mm::frame::mapping::frame_to_index(
1294 final(owner)@.unwrap().0.value.meta_slot_paddr().unwrap())),
1295 forall |i: usize| #![trigger final(regions).slot_owners[i]]
1297 i != crate::specs::mm::frame::mapping::frame_to_index(
1298 final(owner)@.unwrap().0.value.meta_slot_paddr().unwrap())
1299 ==> final(regions).slot_owners[i] == old(regions).slot_owners[i],
1300 forall |a: usize| old(guards).lock_held(a) ==> final(guards).lock_held(a),
1301 forall |idx: usize| #![trigger final(regions).slot_owners[idx].paths_in_pt]
1302 final(regions).slot_owners[idx].paths_in_pt
1303 == old(regions).slot_owners[idx].paths_in_pt,
1304 forall |kt: PageTableOwner<KernelPtConfig>|
1310 #![trigger kt.metaregion_sound(*final(regions))]
1311 kt.inv() && kt.metaregion_sound(*old(regions))
1312 ==> kt.metaregion_sound(*final(regions)),
1313 forall |kt: PageTableOwner<KernelPtConfig>|
1318 #![trigger kt.metaregion_sound(*old(regions))]
1319 kt.inv() && kt.metaregion_sound(*old(regions)) ==>
1320 kt.0.tree_predicate_map(
1321 kt.0.value.path,
1322 |e: crate::specs::mm::page_table::node::entry_owners::EntryOwner<KernelPtConfig>,
1323 p: vstd_extra::ghost_tree::TreePath<NR_ENTRIES>|
1324 e.meta_slot_paddr() is Some
1325 ==> crate::specs::mm::frame::mapping::frame_to_index(
1326 e.meta_slot_paddr().unwrap()) !=
1327 crate::specs::mm::frame::mapping::frame_to_index(
1328 final(owner)@.unwrap().0.value.meta_slot_paddr().unwrap()),
1329 ),
1330 forall |kt: PageTableOwner<KernelPtConfig>|
1334 #![trigger kt.metaregion_sound(*old(regions))]
1335 kt.inv() && kt.metaregion_sound(*old(regions)) ==>
1336 kt.0.tree_predicate_map(
1337 kt.0.value.path,
1338 |e: crate::specs::mm::page_table::node::entry_owners::EntryOwner<KernelPtConfig>,
1339 p: vstd_extra::ghost_tree::TreePath<NR_ENTRIES>|
1340 e.is_frame() && e.parent_level > 1 ==> {
1341 let pa = e.frame.unwrap().mapped_pa;
1342 let nr_pages = crate::mm::page_table::cursor::page_size_spec(
1343 e.parent_level) / crate::specs::arch::mm::PAGE_SIZE;
1344 forall |j: usize| 0 < j < nr_pages ==> {
1345 let sub_idx =
1346 #[trigger] crate::specs::mm::frame::mapping::frame_to_index(
1347 (pa + j * crate::specs::arch::mm::PAGE_SIZE) as usize);
1348 sub_idx != crate::specs::mm::frame::mapping::frame_to_index(
1349 final(owner)@.unwrap().0.value.meta_slot_paddr().unwrap())
1350 }
1351 },
1352 ),
1353 )]
1354 pub fn empty_with_owner() -> Self {
1355 unimplemented!()
1356 }
1357
1358 #[verifier::external_body]
1359 pub(in crate::mm) unsafe fn first_activate_unchecked(&self) {
1360 unimplemented!()
1361 }
1365
1366 #[verifier::external_body]
1372 #[verifier::when_used_as_spec(root_paddr_spec)]
1373 pub fn root_paddr(&self) -> (r: Paddr)
1374 returns
1375 self.root_paddr_spec(),
1376 {
1377 unimplemented!()
1378 }
1381
1382 #[cfg(ktest)]
1388 pub fn page_walk(&self, vaddr: Vaddr) -> Option<(Paddr, PageProperty)> {
1389 unsafe { page_walk::<C>(self.root_paddr(), vaddr) }
1391 }
1392
1393 #[verus_spec(r =>
1398 with Tracked(owner): Tracked<PageTableOwner<C>>,
1399 Tracked(guard_perm): Tracked<GuardPerm<'rcu, C>>,
1400 Tracked(regions): Tracked<&mut MetaRegionOwners>,
1401 Tracked(guards): Tracked<&mut Guards<'rcu, C>>
1402 requires
1403 ensures
1404 Cursor::<C, G>::cursor_new_success_conditions(va) ==> {
1405 &&& r is Ok
1406 &&& r.unwrap().0.inner.invariants(*r.unwrap().1, *final(regions), *final(guards))
1407 &&& r.unwrap().1.in_locked_range()
1408 &&& r.unwrap().0.inner.level < r.unwrap().0.inner.guard_level
1409 &&& r.unwrap().0.inner.guard_level == NR_LEVELS as PagingLevel
1410 &&& r.unwrap().0.inner.va < r.unwrap().0.inner.barrier_va.end
1411 &&& r.unwrap().0.inner.va == va.start
1412 &&& r.unwrap().0.inner.barrier_va == *va
1413 },
1414 forall |item: C::Item| #![trigger CursorMut::<'rcu, C, G>::item_not_mapped(item, *old(regions))]
1415 CursorMut::<'rcu, C, G>::item_not_mapped(item, *old(regions)) ==>
1416 CursorMut::<'rcu, C, G>::item_not_mapped(item, *final(regions)),
1417 forall |idx: usize| #![auto]
1419 (*final(regions)).slot_owners[idx].paths_in_pt == (*old(regions)).slot_owners[idx].paths_in_pt,
1420 )]
1421 #[verifier::external_body]
1422 pub fn cursor_mut<'rcu, G: InAtomicMode>(
1423 &'rcu self,
1424 guard: &'rcu G,
1425 va: &Range<Vaddr>,
1426 ) -> Result<(CursorMut<'rcu, C, G>, Tracked<CursorOwner<'rcu, C>>), PageTableError> {
1427 #[verus_spec(with Tracked(owner), Tracked(guard_perm), Tracked(regions), Tracked(guards))]
1428 CursorMut::new(self, guard, va)
1429 }
1430
1431 #[verus_spec(r =>
1437 with Tracked(owner): Tracked<PageTableOwner<C>>,
1438 Tracked(guard_perm): Tracked<GuardPerm<'rcu, C>>,
1439 Tracked(regions): Tracked<&mut MetaRegionOwners>,
1440 Tracked(guards): Tracked<&mut Guards<'rcu, C>>
1441 requires
1442 owner.inv(),
1443 ensures
1444 Cursor::<C, G>::cursor_new_success_conditions(va) ==> {
1445 &&& r is Ok
1446 &&& r.unwrap().0.invariants(*r.unwrap().1, *final(regions), *final(guards))
1447 &&& r.unwrap().1.in_locked_range()
1448 &&& r.unwrap().0.level < r.unwrap().0.guard_level
1449 &&& r.unwrap().0.va < r.unwrap().0.barrier_va.end
1450 &&& r.unwrap().0.va == va.start
1451 &&& r.unwrap().0.barrier_va == *va
1452 &&& r.unwrap().1@.as_page_table_owner() == owner
1453 &&& r.unwrap().1@.continuations[3].path() == owner.0.value.path
1454 },
1455 !Cursor::<C, G>::cursor_new_success_conditions(va) ==> r is Err,
1456 forall|idx: usize| #![trigger final(regions).slot_owners[idx].paths_in_pt]
1457 old(regions).slot_owners[idx].inner_perms.ref_count.value()
1458 != crate::specs::mm::frame::meta_owners::REF_COUNT_UNUSED
1459 ==> final(regions).slot_owners[idx].paths_in_pt
1460 == old(regions).slot_owners[idx].paths_in_pt,
1461 (forall |i: usize| #![trigger old(regions).slot_owners[i]]
1463 old(regions).slot_owners.contains_key(i)
1464 && old(regions).slot_owners[i].inner_perms.ref_count.value()
1465 != crate::specs::mm::frame::meta_owners::REF_COUNT_UNUSED
1466 ==> old(regions).slot_owners[i].inner_perms.ref_count.value() + 1
1467 < crate::specs::mm::frame::meta_owners::REF_COUNT_MAX)
1468 ==>
1469 (forall |i: usize| #![trigger final(regions).slot_owners[i]]
1470 final(regions).slot_owners.contains_key(i)
1471 && final(regions).slot_owners[i].inner_perms.ref_count.value()
1472 != crate::specs::mm::frame::meta_owners::REF_COUNT_UNUSED
1473 ==> final(regions).slot_owners[i].inner_perms.ref_count.value() + 1
1474 < crate::specs::mm::frame::meta_owners::REF_COUNT_MAX),
1475 )]
1476 pub fn cursor<'rcu, G: InAtomicMode>(&'rcu self, guard: &'rcu G, va: &Range<Vaddr>)
1477 -> Result<(Cursor<'rcu, C, G>, Tracked<CursorOwner<'rcu, C>>), PageTableError> {
1478 #[verus_spec(with Tracked(owner), Tracked(guard_perm), Tracked(regions), Tracked(guards))]
1479 Cursor::new(self, guard, va)
1480 }
1481
1482 }
1494
1495#[cfg(ktest)]
1518pub(super) unsafe fn page_walk<C: PageTableConfig>(root_paddr: Paddr, vaddr: Vaddr) -> Option<
1519 (Paddr, PageProperty),
1520> {
1521 use super::paddr_to_vaddr;
1522
1523 let _rcu_guard = disable_preempt();
1524
1525 let mut pt_addr = paddr_to_vaddr(root_paddr);
1526 #[verusfmt::skip]
1527 for cur_level in (1..= C::NR_LEVELS()).rev() {
1528 let offset = pte_index::<C>(vaddr, cur_level);
1529 let cur_pte = unsafe { load_pte((pt_addr as *mut C::E).add(offset), Ordering::Acquire) };
1535
1536 if !cur_pte.is_present() {
1537 return None;
1538 }
1539 if cur_pte.is_last(cur_level) {
1540 debug_assert!(cur_level <= C::HIGHEST_TRANSLATION_LEVEL);
1541 return Some(
1542 (cur_pte.paddr() + (vaddr & (page_size::<C>(cur_level) - 1)), cur_pte.prop()),
1543 );
1544 }
1545 pt_addr = paddr_to_vaddr(cur_pte.paddr());
1546 }
1547
1548 unreachable!("All present PTEs at the level 1 must be last-level PTEs");
1549}
1550
1551#[verifier::external_body]
1566#[verus_spec(
1567 with Tracked(perm): Tracked<&vstd_extra::array_ptr::PointsTo<E, NR_ENTRIES>>
1568 requires
1569 perm.is_init(ptr.index as int),
1570 perm.addr() == ptr.addr(),
1571 0 <= ptr.index < NR_ENTRIES,
1572 returns
1573 perm.value()[ptr.index as int],
1574)]
1575pub fn load_pte<E: PageTableEntryTrait>(
1576 ptr: vstd_extra::array_ptr::ArrayPtr<E, NR_ENTRIES>,
1577 ordering: Ordering,
1578) -> (pte: E) {
1579 unimplemented!()
1580}
1581
1582#[verifier::external_body]
1595#[verus_spec(
1596 with Tracked(perm): Tracked<&mut vstd_extra::array_ptr::PointsTo<E, NR_ENTRIES>>
1597 requires
1598 old(perm).addr() == ptr.addr(),
1599 0 <= ptr.index < NR_ENTRIES,
1600 old(perm).is_init_all(),
1601 ensures
1602 final(perm).value()[ptr.index as int] == new_val,
1603 final(perm).value() == old(perm).value().update(ptr.index as int, new_val),
1604 final(perm).addr() == old(perm).addr(),
1605 final(perm).is_init_all(),
1606)]
1607pub fn store_pte<E: PageTableEntryTrait>(
1608 ptr: vstd_extra::array_ptr::ArrayPtr<E, NR_ENTRIES>,
1609 new_val: E,
1610 ordering: Ordering,
1611);
1612
1613}