pub struct EntryOwner<C: PageTableConfig> {
pub node: Option<NodeOwner<C>>,
pub frame: Option<FrameEntryOwner>,
pub locked: Option<Ghost<Seq<FrameView<C>>>>,
pub absent: bool,
pub in_scope: bool,
pub path: TreePath<NR_ENTRIES>,
pub parent_level: PagingLevel,
}Fields§
§node: Option<NodeOwner<C>>§frame: Option<FrameEntryOwner>§locked: Option<Ghost<Seq<FrameView<C>>>>§absent: bool§in_scope: bool§path: TreePath<NR_ENTRIES>§parent_level: PagingLevelImplementations§
Source§impl<C: PageTableConfig> EntryOwner<C>
impl<C: PageTableConfig> EntryOwner<C>
Sourcepub open spec fn from_pte_regions_spec(
self,
regions: MetaRegionOwners,
) -> MetaRegionOwners
pub open spec fn from_pte_regions_spec( self, regions: MetaRegionOwners, ) -> MetaRegionOwners
{
if self.is_node() {
let index = frame_to_index(self.meta_slot_paddr().unwrap());
let old_slot = regions.slot_owners[index];
let new_slot = MetaSlotOwner {
raw_count: 0usize,
..old_slot
};
MetaRegionOwners {
slots: regions.slots.insert(index, self.node.unwrap().meta_perm.points_to),
slot_owners: regions.slot_owners.insert(index, new_slot),
}
} else {
regions
}
}Sourcepub open spec fn into_pte_regions_spec(
self,
regions: MetaRegionOwners,
) -> MetaRegionOwners
pub open spec fn into_pte_regions_spec( self, regions: MetaRegionOwners, ) -> MetaRegionOwners
{
if self.is_node() {
let index = frame_to_index(self.meta_slot_paddr().unwrap());
let old_slot = regions.slot_owners[index];
let new_slot = MetaSlotOwner {
raw_count: (old_slot.raw_count + 1) as usize,
..old_slot
};
MetaRegionOwners {
slots: regions.slots,
slot_owners: regions.slot_owners.insert(index, new_slot),
..regions
}
} else {
regions
}
}Sourcepub open spec fn into_pte_owner_spec(self) -> EntryOwner<C>
pub open spec fn into_pte_owner_spec(self) -> EntryOwner<C>
{
EntryOwner {
in_scope: false,
..self
}
}Sourcepub open spec fn from_pte_owner_spec(self) -> EntryOwner<C>
pub open spec fn from_pte_owner_spec(self) -> EntryOwner<C>
{
EntryOwner {
in_scope: true,
..self
}
}Sourcepub open spec fn pte_invariants(self, pte: C::E, regions: MetaRegionOwners) -> bool
pub open spec fn pte_invariants(self, pte: C::E, regions: MetaRegionOwners) -> bool
{
&&& self.inv()
&&& regions.inv()
&&& self.match_pte(pte, self.parent_level)
&&& self.metaregion_sound(regions)
&&& !self.in_scope
}This is equivalent to the other invariants relations, combining the inv predicates for each
object and the well-formedness relations between them.
Source§impl<C: PageTableConfig> EntryOwner<C>
impl<C: PageTableConfig> EntryOwner<C>
Sourcepub open spec fn new_absent_spec(
path: TreePath<NR_ENTRIES>,
parent_level: PagingLevel,
) -> Self
pub open spec fn new_absent_spec( path: TreePath<NR_ENTRIES>, parent_level: PagingLevel, ) -> Self
{
EntryOwner {
node: None,
frame: None,
locked: None,
absent: true,
in_scope: true,
path,
parent_level,
}
}Sourcepub open spec fn new_frame_spec(
paddr: Paddr,
path: TreePath<NR_ENTRIES>,
parent_level: PagingLevel,
prop: PageProperty,
) -> Self
pub open spec fn new_frame_spec( paddr: Paddr, path: TreePath<NR_ENTRIES>, parent_level: PagingLevel, prop: PageProperty, ) -> Self
{
EntryOwner {
node: None,
frame: Some(FrameEntryOwner {
mapped_pa: paddr,
size: page_size(parent_level),
prop,
}),
locked: None,
absent: false,
in_scope: true,
path,
parent_level,
}
}Sourcepub open spec fn new_node_spec(node: NodeOwner<C>, path: TreePath<NR_ENTRIES>) -> Self
pub open spec fn new_node_spec(node: NodeOwner<C>, path: TreePath<NR_ENTRIES>) -> Self
{
EntryOwner {
node: Some(node),
frame: None,
locked: None,
absent: false,
in_scope: true,
path,
parent_level: (node.level + 1) as PagingLevel,
}
}Sourcepub proof fn new_absent(path: TreePath<NR_ENTRIES>, parent_level: PagingLevel) -> tracked Self
pub proof fn new_absent(path: TreePath<NR_ENTRIES>, parent_level: PagingLevel) -> tracked Self
Self::new_absent_spec(path, parent_level),Sourcepub proof fn new_frame(
paddr: Paddr,
path: TreePath<NR_ENTRIES>,
parent_level: PagingLevel,
prop: PageProperty,
) -> tracked Self
pub proof fn new_frame( paddr: Paddr, path: TreePath<NR_ENTRIES>, parent_level: PagingLevel, prop: PageProperty, ) -> tracked Self
Self::new_frame_spec(paddr, path, parent_level, prop),Sourcepub proof fn new_node(node: NodeOwner<C>, path: TreePath<NR_ENTRIES>) -> tracked Self
pub proof fn new_node(node: NodeOwner<C>, path: TreePath<NR_ENTRIES>) -> tracked Self
Self::new_node_spec(node, path),Sourcepub proof fn new_untracked_frame(
paddr: Paddr,
parent_level: PagingLevel,
prop: PageProperty,
) -> tracked res : Self
pub proof fn new_untracked_frame( paddr: Paddr, parent_level: PagingLevel, prop: PageProperty, ) -> tracked res : Self
paddr % PAGE_SIZE == 0,paddr < MAX_PADDR,1 <= parent_level,parent_level <= NR_LEVELS,ensuresres.is_frame(),res.frame.unwrap().mapped_pa == paddr,res.frame.unwrap().prop == prop,res.frame.unwrap().size == page_size(parent_level),res.parent_level == parent_level,res.path.inv(),res.in_scope,res.inv_base(),crate::mm::page_table::Child::<C>::Frame(paddr, parent_level, prop).wf(res),Creates a ghost entry owner for mapping an untracked (device memory) frame.
Unlike new_frame, this does not consume a slot permission from the meta region,
since device memory PAs are outside the tracked frame allocator.
The actual mapping correctness is guaranteed by the caller’s unsafe contract.
The requires reflect properties guaranteed by collect_largest_pages postconditions,
so this axiom is only ever called with values that satisfy them.
Sourcepub open spec fn match_pte(self, pte: C::E, parent_level: PagingLevel) -> bool
pub open spec fn match_pte(self, pte: C::E, parent_level: PagingLevel) -> bool
{
&&& pte.paddr() % PAGE_SIZE == 0
&&& pte.paddr() < MAX_PADDR
&&& !pte.is_present()
==> {
&&& self.is_absent()
&&& parent_level > 1 ==> !pte.is_last(parent_level)
}
&&& pte.is_present() && !pte.is_last(parent_level)
==> {
&&& self.is_node()
&&& meta_to_frame(self.node.unwrap().meta_perm.addr()) == pte.paddr()
}
&&& pte.is_present() && pte.is_last(parent_level)
==> {
&&& self.is_frame()
&&& self.frame.unwrap().mapped_pa == pte.paddr()
&&& self.frame.unwrap().prop == pte.prop()
}
}Sourcepub proof fn absent_match_pte(owner: Self, pte: C::E, parent_level: PagingLevel)
pub proof fn absent_match_pte(owner: Self, pte: C::E, parent_level: PagingLevel)
owner.is_absent(),pte == C::E::new_absent_spec(),pte.paddr() % PAGE_SIZE == 0,pte.paddr() < MAX_PADDR,ensuresowner.match_pte(pte, parent_level),When owner is absent and pte is the absent PTE with valid paddr, match_pte holds.
Sourcepub proof fn last_pte_implies_frame_match(self, pte: C::E, parent_level: PagingLevel)
pub proof fn last_pte_implies_frame_match(self, pte: C::E, parent_level: PagingLevel)
self.inv(),self.match_pte(pte, parent_level),1 < parent_level,pte.is_last(parent_level),ensuresself.is_frame(),self.frame.unwrap().mapped_pa == pte.paddr(),self.frame.unwrap().prop == pte.prop(),Sourcepub proof fn huge_frame_split_child_at(self, regions: MetaRegionOwners, idx: usize)
pub proof fn huge_frame_split_child_at(self, regions: MetaRegionOwners, idx: usize)
self.inv(),self.is_frame(),regions.inv(),1 < self.parent_level < NR_LEVELS,idx < NR_ENTRIES,ensuresself.frame.unwrap().mapped_pa + idx * page_size((self.parent_level - 1) as PagingLevel)
< MAX_PADDR,((self.frame.unwrap().mapped_pa
+ idx * page_size((self.parent_level - 1) as PagingLevel)) as Paddr)
% page_size((self.parent_level - 1) as PagingLevel) == 0,((self.frame.unwrap().mapped_pa
+ idx * page_size((self.parent_level - 1) as PagingLevel)) as Paddr)
+ page_size((self.parent_level - 1) as PagingLevel) <= MAX_PADDR,((self.frame.unwrap().mapped_pa
+ idx * page_size((self.parent_level - 1) as PagingLevel)) as Paddr) % PAGE_SIZE == 0,Sourcepub open spec fn expected_raw_count(self) -> usize
pub open spec fn expected_raw_count(self) -> usize
{ if self.in_scope { 0 } else { 1 } }Sourcepub proof fn frame_sub_pages_valid_preserved_at_own_slot(
self,
r0: MetaRegionOwners,
r1: MetaRegionOwners,
)
pub proof fn frame_sub_pages_valid_preserved_at_own_slot( self, r0: MetaRegionOwners, r1: MetaRegionOwners, )
self.inv(),r0.inv(),self.is_frame(),self.parent_level <= NR_LEVELS,self.frame_sub_pages_valid(r0),r0.slots == r1.slots,r0.slot_owners.dom() =~= r1.slot_owners.dom(),forall |i: usize| {
i != frame_to_index(self.meta_slot_paddr().unwrap())
&& r0.slot_owners.contains_key(i) ==> r0.slot_owners[i] == r1.slot_owners[i]
},ensuresself.frame_sub_pages_valid(r1),Helper: sub-page validity is preserved when the only slot that changed is the frame’s own slot (and slots map and other slot owners are unchanged).
Sourcepub open spec fn frame_sub_pages_valid(self, regions: MetaRegionOwners) -> bool
pub open spec fn frame_sub_pages_valid(self, regions: MetaRegionOwners) -> bool
{
self.is_frame() && self.parent_level > 1
==> {
let pa = self.frame.unwrap().mapped_pa;
let nr_pages = page_size(self.parent_level) / PAGE_SIZE;
forall |j: usize| {
0 < j < nr_pages
==> {
let sub_idx = frame_to_index((pa + j * PAGE_SIZE) as usize);
&&& regions.slots.contains_key(sub_idx)
&&& regions.slot_owners[sub_idx].inner_perms.ref_count.value()
!= REF_COUNT_UNUSED
&&& regions.slot_owners[sub_idx].inner_perms.ref_count.value()
> 0
}
}
}
}Sub-page slot validity for huge frames (fine-grained: all 4KB pages within).
When a frame at this entry has parent_level > 1, it is a huge page covering
page_size(parent_level) bytes. Every 4KB sub-page within this range (excluding
the j = 0 case which coincides with the frame’s own slot) must be allocated
(in the free pool) with rc != UNUSED.
The fine-grained form (over j * PAGE_SIZE rather than j * page_size(L-1)) is
what enables the recursive split case in split_if_mapped_huge: when splitting a
1GB frame into 2MB sub-frames, each 2MB sub-frame’s own sub-page validity (over
its 511 4KB sub-sub-pages) follows from the 1GB frame’s fine-grained validity over
the corresponding subrange of indices.
Sourcepub open spec fn metaregion_sound(self, regions: MetaRegionOwners) -> bool
pub open spec fn metaregion_sound(self, regions: MetaRegionOwners) -> bool
{
if self.is_node() {
let idx = frame_to_index(self.meta_slot_paddr().unwrap());
&&& regions.slot_owners[idx].inner_perms.ref_count.value() != REF_COUNT_UNUSED
&&& regions.slot_owners[idx].raw_count == self.expected_raw_count()
&&& regions.slot_owners[idx].self_addr == self.node.unwrap().meta_perm.addr()
&&& self.node.unwrap().meta_perm.points_to.value().wf(regions.slot_owners[idx])
&&& regions.slot_owners[idx].paths_in_pt == set![self.path]
} else if self.is_frame() {
let idx = frame_to_index(self.meta_slot_paddr().unwrap());
&&& regions.slots.contains_key(idx)
&&& regions.slots[idx].addr() == meta_addr(idx)
&&& regions.slots[idx].is_init()
&&& regions.slots[idx].value().wf(regions.slot_owners[idx])
&&& regions.slot_owners[idx].inner_perms.ref_count.value() != REF_COUNT_UNUSED
&&& regions.slot_owners[idx].inner_perms.ref_count.value() > 0
&&& regions.slot_owners[idx].paths_in_pt.contains(self.path)
&&& self.frame_sub_pages_valid(regions)
} else {
true
}
}Sourcepub proof fn active_entry_not_in_free_pool(
entry: Self,
regions: MetaRegionOwners,
free_idx: usize,
)
pub proof fn active_entry_not_in_free_pool( entry: Self, regions: MetaRegionOwners, free_idx: usize, )
regions.inv(),entry.inv(),entry.is_node(),entry.metaregion_sound(regions),regions.slots.contains_key(free_idx),ensuresframe_to_index(entry.meta_slot_paddr().unwrap()) != free_idx,PointsTo uniqueness: if meta slot free_idx is in the free pool (regions.slots),
no active page table NODE entry can own a PointsTo at the same slot address.
Justified by Verus’s linear ownership of PointsTo<MetaSlot>:
the node’s PointsTo is either in regions.slots OR held by the node, never both.
(Frames no longer own their PointsTo — they read from regions.slots directly.)
Sourcepub open spec fn meta_slot_paddr(self) -> Option<Paddr>
pub open spec fn meta_slot_paddr(self) -> Option<Paddr>
{
if self.is_node() {
Some(meta_to_frame(self.node.unwrap().meta_perm.addr()))
} else if self.is_frame() {
Some(self.frame.unwrap().mapped_pa)
} else {
None
}
}Sourcepub open spec fn meta_slot_paddr_neq(self, other: Self) -> bool
pub open spec fn meta_slot_paddr_neq(self, other: Self) -> bool
{
self.meta_slot_paddr() is Some
==> (other.meta_slot_paddr() is Some
==> self.meta_slot_paddr().unwrap() != other.meta_slot_paddr().unwrap())
}Sourcepub proof fn metaregion_sound_slot_owners_only(
self,
r0: MetaRegionOwners,
r1: MetaRegionOwners,
)
pub proof fn metaregion_sound_slot_owners_only( self, r0: MetaRegionOwners, r1: MetaRegionOwners, )
self.metaregion_sound(r0),r0.slot_owners == r1.slot_owners,forall |k: usize| r0.slots.contains_key(k) ==> #[trigger] r1.slots.contains_key(k),forall |k: usize| r0.slots.contains_key(k) ==> r0.slots[k] == #[trigger] r1.slots[k],ensuresself.metaregion_sound(r1),metaregion_sound transfers when slot_owners matches and slots is a superset.
For nodes: only slot_owners matters. For frames: slots.contains_key and slots[idx]
must be preserved, which holds when slots is a superset with values unchanged.
Sourcepub proof fn metaregion_sound_one_slot_changed(
self,
r0: MetaRegionOwners,
r1: MetaRegionOwners,
changed_idx: usize,
)
pub proof fn metaregion_sound_one_slot_changed( self, r0: MetaRegionOwners, r1: MetaRegionOwners, changed_idx: usize, )
self.metaregion_sound(r0),forall |i: usize| i != changed_idx ==> r0.slot_owners[i] == r1.slot_owners[i],r0.slot_owners.dom() =~= r1.slot_owners.dom(),forall |k: usize| r0.slots.contains_key(k) ==> #[trigger] r1.slots.contains_key(k),forall |k: usize| r0.slots.contains_key(k) ==> r0.slots[k] == #[trigger] r1.slots[k],self.meta_slot_paddr() is Some
==> frame_to_index(self.meta_slot_paddr().unwrap()) != changed_idx,self.is_frame() && self.parent_level > 1
==> {
let pa = self.frame.unwrap().mapped_pa;
let nr_pages = page_size(self.parent_level) / PAGE_SIZE;
forall |j: usize| {
0 < j < nr_pages
==> {
let sub_idx = #[trigger]
frame_to_index((pa + j * PAGE_SIZE) as usize);
sub_idx != changed_idx
|| (r1.slots.contains_key(sub_idx)
&& r1.slot_owners[sub_idx].inner_perms.ref_count.value()
!= REF_COUNT_UNUSED
&& r1.slot_owners[sub_idx].inner_perms.ref_count.value() > 0)
}
}
},ensuresself.metaregion_sound(r1),If metaregion_sound(r0) holds and r1 differs from r0 only at one slot index
that this entry does not reference, then metaregion_sound(r1) also holds.
Sourcepub proof fn metaregion_sound_paths_in_pt_changed(
self,
r0: MetaRegionOwners,
r1: MetaRegionOwners,
changed_idx: usize,
)
pub proof fn metaregion_sound_paths_in_pt_changed( self, r0: MetaRegionOwners, r1: MetaRegionOwners, changed_idx: usize, )
self.inv(),r0.inv(),self.metaregion_sound(r0),r0.slots == r1.slots,r0.slot_owners.dom() =~= r1.slot_owners.dom(),forall |i: usize| i != changed_idx ==> r0.slot_owners[i] == r1.slot_owners[i],r1.slot_owners[changed_idx].inner_perms == r0.slot_owners[changed_idx].inner_perms,r1.slot_owners[changed_idx].self_addr == r0.slot_owners[changed_idx].self_addr,r1.slot_owners[changed_idx].raw_count == r0.slot_owners[changed_idx].raw_count,r1.slot_owners[changed_idx].usage == r0.slot_owners[changed_idx].usage,self.is_node() && self.meta_slot_paddr() is Some
&& frame_to_index(self.meta_slot_paddr().unwrap()) == changed_idx
==> r1.slot_owners[changed_idx].paths_in_pt == set![self.path],self.is_frame() && self.meta_slot_paddr() is Some
&& frame_to_index(self.meta_slot_paddr().unwrap()) == changed_idx
==> r1.slot_owners[changed_idx].paths_in_pt.contains(self.path),self.is_frame() && self.parent_level > 1
==> {
let pa = self.frame.unwrap().mapped_pa;
let sub_level = (self.parent_level - 1) as PagingLevel;
forall |j: usize| {
0 < j < NR_ENTRIES
==> {
let sub_idx = #[trigger]
frame_to_index((pa + j * page_size(sub_level)) as usize);
sub_idx != changed_idx
|| r1.slot_owners[changed_idx].paths_in_pt.is_empty()
}
}
},ensuresself.metaregion_sound(r1),metaregion_sound is preserved when only paths_in_pt changes at a slot,
slots is unchanged, and the new paths_in_pt is correct for any node at that index.
Sourcepub proof fn same_paddr_implies_same_path(
self,
other: Self,
regions: MetaRegionOwners,
)
pub proof fn same_paddr_implies_same_path( self, other: Self, regions: MetaRegionOwners, )
self.meta_slot_paddr() is Some,self.meta_slot_paddr() == other.meta_slot_paddr(),regions.slot_owners[frame_to_index(self.meta_slot_paddr().unwrap())].paths_in_pt
== set![self.path],regions.slot_owners[frame_to_index(self.meta_slot_paddr().unwrap())].paths_in_pt
== set![other.path],ensuresself.path == other.path,Two entries with the same physical address whose paths_in_pt matches their
respective paths must have the same path.
Sourcepub proof fn metaregion_sound_rc_value_changed(
self,
r0: MetaRegionOwners,
r1: MetaRegionOwners,
)
pub proof fn metaregion_sound_rc_value_changed( self, r0: MetaRegionOwners, r1: MetaRegionOwners, )
self.inv(),r0.inv(),self.metaregion_sound(r0),self.meta_slot_paddr() is Some,r0.slots == r1.slots,({
let idx = frame_to_index(self.meta_slot_paddr().unwrap());
&&& r1.slot_owners[idx].inner_perms.ref_count.id()
== r0.slot_owners[idx].inner_perms.ref_count.id()
&&& r1.slot_owners[idx].inner_perms.ref_count.value()
!= crate::specs::mm::frame::meta_owners::REF_COUNT_UNUSED
&&& r1.slot_owners[idx].inner_perms.ref_count.value() > 0
&&& r1.slot_owners[idx].inner_perms.storage
== r0.slot_owners[idx].inner_perms.storage
&&& r1.slot_owners[idx].inner_perms.vtable_ptr
== r0.slot_owners[idx].inner_perms.vtable_ptr
&&& r1.slot_owners[idx].inner_perms.in_list
== r0.slot_owners[idx].inner_perms.in_list
&&& r1.slot_owners[idx].self_addr == r0.slot_owners[idx].self_addr
&&& r1.slot_owners[idx].raw_count == r0.slot_owners[idx].raw_count
&&& r1.slot_owners[idx].paths_in_pt == r0.slot_owners[idx].paths_in_pt
}),forall |i: usize| {
i != frame_to_index(self.meta_slot_paddr().unwrap())
&& r0.slot_owners.contains_key(i) ==> r0.slot_owners[i] == r1.slot_owners[i]
},ensuresself.metaregion_sound(r1),metaregion_sound is preserved when only ref_count.value() changes at this entry’s slot
and slots is unchanged.
Sourcepub proof fn nodes_different_paths_different_addrs(
self,
other: Self,
regions: MetaRegionOwners,
)
pub proof fn nodes_different_paths_different_addrs( self, other: Self, regions: MetaRegionOwners, )
self.is_node(),other.is_node(),self.meta_slot_paddr() is Some
==> regions.slot_owners[frame_to_index(self.meta_slot_paddr().unwrap())].paths_in_pt
== set![self.path],other.meta_slot_paddr() is Some
==> regions.slot_owners[frame_to_index(other.meta_slot_paddr().unwrap())].paths_in_pt
== set![other.path],self.path != other.path,ensuresself.node.unwrap().meta_perm.addr() != other.node.unwrap().meta_perm.addr(),Two nodes whose paths_in_pt matches their paths have different addresses
if they have different paths.
Sourcepub proof fn nodes_different_path_lengths_neq_slot(
self,
other: Self,
regions: MetaRegionOwners,
)
pub proof fn nodes_different_path_lengths_neq_slot( self, other: Self, regions: MetaRegionOwners, )
self.is_node(),other.is_node(),self.metaregion_sound(regions),other.metaregion_sound(regions),self.path.len() != other.path.len(),ensuresself.meta_slot_paddr_neq(other),Two node entries with metaregion_sound under the same regions cannot share
a meta slot paddr if their paths have different lengths.
For nodes, metaregion_sound requires paths_in_pt == set![path] (singleton).
Equal slot indices would force equal singleton sets, hence equal paths —
contradicting the length difference.
Source§impl<C: PageTableConfig> EntryOwner<C>
impl<C: PageTableConfig> EntryOwner<C>
Sourcepub open spec fn inv_base(self) -> bool
pub open spec fn inv_base(self) -> bool
{
&&& self.node is Some
==> {
&&& self.frame is None
&&& self.locked is None
&&& self.node.unwrap().inv()
&&& !self.absent
&&& self.parent_level == self.node.unwrap().level + 1
}
&&& self.frame is Some
==> {
&&& self.node is None
&&& self.locked is None
&&& !self.absent
&&& 1 <= self.parent_level < NR_LEVELS
&&& self.frame.unwrap().mapped_pa % PAGE_SIZE == 0
&&& self.frame.unwrap().mapped_pa < MAX_PADDR
&&& self.frame.unwrap().size == page_size(self.parent_level)
&&& self.frame.unwrap().mapped_pa % page_size(self.parent_level) == 0
&&& self.frame.unwrap().mapped_pa + page_size(self.parent_level) <= MAX_PADDR
}
&&& self.locked is Some
==> {
&&& self.frame is None
&&& self.node is None
&&& !self.absent
}
&&& self.path.inv()
}Structural invariant without !in_scope. Used by Child::invariants
for entries that have been taken out of the tree (in_scope == true).
Sourcepub open spec fn set_in_scope(self, in_scope: bool) -> Self
pub open spec fn set_in_scope(self, in_scope: bool) -> Self
{ EntryOwner { in_scope, ..self } }Trait Implementations§
Source§impl<C: PageTableConfig> Inv for EntryOwner<C>
impl<C: PageTableConfig> Inv for EntryOwner<C>
Source§impl<C: PageTableConfig> InvView for EntryOwner<C>
impl<C: PageTableConfig> InvView for EntryOwner<C>
Source§proof fn view_preserves_inv(self)
proof fn view_preserves_inv(self)
Source§impl<C: PageTableConfig, const L: usize> TreeNodeValue<L> for EntryOwner<C>
impl<C: PageTableConfig, const L: usize> TreeNodeValue<L> for EntryOwner<C>
Source§open spec fn default(lv: nat) -> Self
open spec fn default(lv: nat) -> Self
{
Self {
in_scope: false,
path: TreePath::new(Seq::empty()),
parent_level: (INC_LEVELS - lv) as PagingLevel,
node: None,
frame: None,
locked: None,
absent: true,
}
}Source§proof fn default_preserves_inv()
proof fn default_preserves_inv()
Source§proof fn default_preserves_la_inv()
proof fn default_preserves_la_inv()
Source§open spec fn rel_children(self, i: int, child: Option<Self>) -> bool
open spec fn rel_children(self, i: int, child: Option<Self>) -> bool
{ true }Source§proof fn default_preserves_rel_children(self, lv: nat)
proof fn default_preserves_rel_children(self, lv: nat)
Source§impl<C: PageTableConfig> View for EntryOwner<C>
impl<C: PageTableConfig> View for EntryOwner<C>
Source§open spec fn view(&self) -> <Self as View>::V
open spec fn view(&self) -> <Self as View>::V
{
if let Some(frame) = self.frame {
EntryView::Leaf {
leaf: LeafPageTableEntryView {
map_va: vaddr(self.path) as int,
map_to_pa: frame.mapped_pa as int,
level: self.path.len() as u8,
prop: frame.prop,
phantom: PhantomData,
},
}
} else if let Some(node) = self.node {
EntryView::Intermediate {
node: IntermediatePageTableEntryView {
map_va: vaddr(self.path) as int,
map_to_pa: meta_to_frame(node.meta_perm.addr()) as int,
level: self.path.len() as u8,
phantom: PhantomData,
},
}
} else if let Some(view) = self.locked {
EntryView::LockedSubtree {
views: view@,
}
} else {
EntryView::Absent
}
}