pub struct PageTable<C: PageTableConfig> {
pub root: PageTableNode<C>,
}Expand description
A handle to a page table. A page table can track the lifetime of the mapped physical pages.
Fields§
§root: PageTableNode<C>Implementations§
Source§impl PageTable<KernelPtConfig>
impl PageTable<KernelPtConfig>
Sourcepub open spec fn create_user_pt_panic_condition(
root_owner: NodeOwner<KernelPtConfig>,
) -> bool
pub open spec fn create_user_pt_panic_condition( root_owner: NodeOwner<KernelPtConfig>, ) -> bool
{
exists |i: usize| {
KernelPtConfig::TOP_LEVEL_INDEX_RANGE_spec().start <= i
< KernelPtConfig::TOP_LEVEL_INDEX_RANGE_spec().end
&& {
let pte = root_owner.children_perm.value()[i as int];
||| !pte.is_present()
||| pte.is_last(root_owner.level)
}
}
}Panic condition for Self::create_user_page_table:
Some kernel root entry at index i in TOP_LEVEL_INDEX_RANGE is
not a page table node (i.e., is absent or maps a huge frame).
Source§impl<C: PageTableConfig> PageTable<C>
impl<C: PageTableConfig> PageTable<C>
Sourcepub uninterp fn root_paddr_spec(&self) -> Paddr
pub uninterp fn root_paddr_spec(&self) -> Paddr
Sourcepub exec fn empty() -> Self
pub exec fn empty() -> Self
Create a new empty page table.
Useful for the IOMMU page tables only.
Sourcepub exec fn empty_with_owner() -> r : Self
pub exec fn empty_with_owner() -> r : Self
Tracked(owner): Tracked<&mut Option<PageTableOwner<C>>>,
Tracked(regions): Tracked<&mut MetaRegionOwners>,
Tracked(guards): Tracked<&mut Guards<'static, C>>,requiresold(regions).inv(),ensuresfinal(owner)@ is Some,final(owner)@.unwrap().inv(),final(owner)@.unwrap().0.value.is_node(),final(owner)@.unwrap().0.value.node is Some,r.root.ptr.addr() == final(owner)@.unwrap().0.value.node.unwrap().meta_perm.addr(),final(owner)@.unwrap().0.value.metaregion_sound(*final(regions)),final(regions).inv(),final(guards).unlocked(final(owner)@.unwrap().0.value.node.unwrap().meta_perm.addr()),old(regions)
.slots
.contains_key(
crate::specs::mm::frame::mapping::frame_to_index(
final(owner)@.unwrap().0.value.meta_slot_paddr().unwrap(),
),
),!final(regions)
.slots
.contains_key(
crate::specs::mm::frame::mapping::frame_to_index(
final(owner)@.unwrap().0.value.meta_slot_paddr().unwrap(),
),
),forall |i: usize| {
i
!= crate::specs::mm::frame::mapping::frame_to_index(
final(owner)@.unwrap().0.value.meta_slot_paddr().unwrap(),
) ==> final(regions).slot_owners[i] == old(regions).slot_owners[i]
},forall |a: usize| old(guards).lock_held(a) ==> final(guards).lock_held(a),forall |idx: usize| (
final(regions).slot_owners[idx].paths_in_pt
== old(regions).slot_owners[idx].paths_in_pt
),forall |kt: PageTableOwner<KernelPtConfig>| {
kt.inv() && kt.metaregion_sound(*old(regions))
==> kt.metaregion_sound(*final(regions))
},forall |kt: PageTableOwner<KernelPtConfig>| {
kt.inv() && kt.metaregion_sound(*old(regions))
==> kt
.0
.tree_predicate_map(
kt.0.value.path,
|
e: crate::specs::mm::page_table::node::entry_owners::EntryOwner<
KernelPtConfig,
>,
p: vstd_extra::ghost_tree::TreePath<NR_ENTRIES>|
(
e.meta_slot_paddr() is Some
==> crate::specs::mm::frame::mapping::frame_to_index(
e.meta_slot_paddr().unwrap(),
)
!= crate::specs::mm::frame::mapping::frame_to_index(
final(owner)@.unwrap().0.value.meta_slot_paddr().unwrap(),
)
),
)
},forall |kt: PageTableOwner<KernelPtConfig>| {
kt.inv() && kt.metaregion_sound(*old(regions))
==> kt
.0
.tree_predicate_map(
kt.0.value.path,
|
e: crate::specs::mm::page_table::node::entry_owners::EntryOwner<
KernelPtConfig,
>,
p: vstd_extra::ghost_tree::TreePath<NR_ENTRIES>|
{
e.is_frame() && e.parent_level > 1
==> {
let pa = e.frame.unwrap().mapped_pa;
let nr_pages = crate::mm::page_table::cursor::page_size_spec(
e.parent_level,
) / crate::specs::arch::mm::PAGE_SIZE;
forall |j: usize| {
0 < j < nr_pages
==> {
let sub_idx = #[trigger]
crate::specs::mm::frame::mapping::frame_to_index(
(pa + j * crate::specs::arch::mm::PAGE_SIZE) as usize,
);
sub_idx
!= crate::specs::mm::frame::mapping::frame_to_index(
final(owner)@.unwrap().0.value.meta_slot_paddr().unwrap(),
)
}
}
}
},
)
},Create a new empty page table together with its tracked ownership.
Sourcepub exec fn root_paddr(&self) -> r : Paddr
pub exec fn root_paddr(&self) -> r : Paddr
self.root_paddr_spec(),The physical address of the root page table.
Obtaining the physical address of the root page table is safe, however, using it or providing it to the hardware will be unsafe since the page table node may be dropped, resulting in UAF.
Sourcepub exec fn cursor_mut<'rcu, G: InAtomicMode>(
&'rcu self,
guard: &'rcu G,
va: &Range<Vaddr>,
) -> r : Result<(CursorMut<'rcu, C, G>, Tracked<CursorOwner<'rcu, C>>), PageTableError>
pub exec fn cursor_mut<'rcu, G: InAtomicMode>( &'rcu self, guard: &'rcu G, va: &Range<Vaddr>, ) -> r : Result<(CursorMut<'rcu, C, G>, Tracked<CursorOwner<'rcu, C>>), PageTableError>
Tracked(owner): Tracked<PageTableOwner<C>>,
Tracked(guard_perm): Tracked<GuardPerm<'rcu, C>>,
Tracked(regions): Tracked<&mut MetaRegionOwners>,
Tracked(guards): Tracked<&mut Guards<'rcu, C>>,ensuresCursor::<C, G>::cursor_new_success_conditions(va)
==> {
&&& r is Ok
&&& r.unwrap().0.inner.invariants(*r.unwrap().1, *final(regions), *final(guards))
&&& r.unwrap().1.in_locked_range()
&&& r.unwrap().0.inner.level < r.unwrap().0.inner.guard_level
&&& r.unwrap().0.inner.guard_level == NR_LEVELS as PagingLevel
&&& r.unwrap().0.inner.va < r.unwrap().0.inner.barrier_va.end
&&& r.unwrap().0.inner.va == va.start
&&& r.unwrap().0.inner.barrier_va == *va
},forall |item: C::Item| {
CursorMut::<'rcu, C, G>::item_not_mapped(item, *old(regions))
==> CursorMut::<'rcu, C, G>::item_not_mapped(item, *final(regions))
},forall |idx: usize| {
(*final(regions)).slot_owners[idx].paths_in_pt
== (*old(regions)).slot_owners[idx].paths_in_pt
},Create a new cursor exclusively accessing the virtual address range for mapping.
If another cursor is already accessing the range, the new cursor may wait until the previous cursor is dropped.
Sourcepub exec fn cursor<'rcu, G: InAtomicMode>(
&'rcu self,
guard: &'rcu G,
va: &Range<Vaddr>,
) -> r : Result<(Cursor<'rcu, C, G>, Tracked<CursorOwner<'rcu, C>>), PageTableError>
pub exec fn cursor<'rcu, G: InAtomicMode>( &'rcu self, guard: &'rcu G, va: &Range<Vaddr>, ) -> r : Result<(Cursor<'rcu, C, G>, Tracked<CursorOwner<'rcu, C>>), PageTableError>
Tracked(owner): Tracked<PageTableOwner<C>>,
Tracked(guard_perm): Tracked<GuardPerm<'rcu, C>>,
Tracked(regions): Tracked<&mut MetaRegionOwners>,
Tracked(guards): Tracked<&mut Guards<'rcu, C>>,requiresowner.inv(),ensuresCursor::<C, G>::cursor_new_success_conditions(va)
==> {
&&& r is Ok
&&& r.unwrap().0.invariants(*r.unwrap().1, *final(regions), *final(guards))
&&& r.unwrap().1.in_locked_range()
&&& r.unwrap().0.level < r.unwrap().0.guard_level
&&& r.unwrap().0.va < r.unwrap().0.barrier_va.end
&&& r.unwrap().0.va == va.start
&&& r.unwrap().0.barrier_va == *va
&&& r.unwrap().1@.as_page_table_owner() == owner
&&& r.unwrap().1@.continuations[3].path() == owner.0.value.path
},!Cursor::<C, G>::cursor_new_success_conditions(va) ==> r is Err,forall |idx: usize| {
old(regions).slot_owners[idx].inner_perms.ref_count.value()
!= crate::specs::mm::frame::meta_owners::REF_COUNT_UNUSED
==> final(regions).slot_owners[idx].paths_in_pt
== old(regions).slot_owners[idx].paths_in_pt
},(forall |i: usize| {
old(regions).slot_owners.contains_key(i)
&& old(regions).slot_owners[i].inner_perms.ref_count.value()
!= crate::specs::mm::frame::meta_owners::REF_COUNT_UNUSED
==> old(regions).slot_owners[i].inner_perms.ref_count.value() + 1
< crate::specs::mm::frame::meta_owners::REF_COUNT_MAX
})
==> (forall |i: usize| (
final(regions).slot_owners.contains_key(i)
&& final(regions).slot_owners[i].inner_perms.ref_count.value()
!= crate::specs::mm::frame::meta_owners::REF_COUNT_UNUSED
==> final(regions).slot_owners[i].inner_perms.ref_count.value() + 1
< crate::specs::mm::frame::meta_owners::REF_COUNT_MAX
)),Create a new cursor exclusively accessing the virtual address range for querying.
If another cursor is already accessing the range, the new cursor may wait until the previous cursor is dropped. The modification to the mapping by the cursor may also block or be overridden by the mapping of another cursor.