pub struct CursorOwner<'rcu, C: PageTableConfig> {
pub level: PagingLevel,
pub continuations: Map<int, CursorContinuation<'rcu, C>>,
pub va: AbstractVaddr,
pub guard_level: PagingLevel,
pub prefix: AbstractVaddr,
pub popped_too_high: bool,
}Fields§
§level: PagingLevel§continuations: Map<int, CursorContinuation<'rcu, C>>§va: AbstractVaddr§guard_level: PagingLevel§prefix: AbstractVaddr§popped_too_high: boolImplementations§
Source§impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>
impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>
Sourcepub open spec fn max_steps_subtree(level: usize) -> usize
pub open spec fn max_steps_subtree(level: usize) -> usize
{
if level <= 1 {
NR_ENTRIES
} else {
(NR_ENTRIES * (Self::max_steps_subtree((level - 1) as usize) + 1)) as usize
}
}The number of steps it will take to walk through every node of a full
page table at level level
Sourcepub open spec fn max_steps_partial(self, level: usize) -> usize
pub open spec fn max_steps_partial(self, level: usize) -> usize
{
if level == NR_LEVELS {
0
} else {
let cont = self.continuations[(level - 1) as int];
let steps = Self::max_steps_subtree(level) * (NR_ENTRIES - cont.idx);
let remaining_steps = self.max_steps_partial((level + 1) as usize);
(steps + remaining_steps) as usize
}
}The number of steps it will take to walk through the remaining entries of the page table starting at the given level.
Sourcepub proof fn max_steps_partial_inv(self, other: Self, level: usize)
pub proof fn max_steps_partial_inv(self, other: Self, level: usize)
self.inv(),other.inv(),self.level == other.level,self.level <= level <= NR_LEVELS,forall |i: int| {
self.level - 1 <= i < NR_LEVELS
==> self.continuations[i].idx == other.continuations[i].idx
},ensuresself.max_steps_partial(level) == other.max_steps_partial(level),Sourcepub open spec fn push_level_owner_spec(self, guard_perm: GuardPerm<'rcu, C>) -> Self
pub open spec fn push_level_owner_spec(self, guard_perm: GuardPerm<'rcu, C>) -> Self
{
let cont = self.continuations[self.level - 1];
let (child, cont) = cont
.make_cont_spec(self.va.index[self.level - 2] as usize, guard_perm);
let new_continuations = self.continuations.insert(self.level - 1, cont);
let new_continuations = new_continuations.insert(self.level - 2, child);
let new_level = (self.level - 1) as u8;
Self {
continuations: new_continuations,
level: new_level,
popped_too_high: false,
..self
}
}Sourcepub proof fn push_level_owner_decreases_steps(self, guard_perm: GuardPerm<'rcu, C>)
pub proof fn push_level_owner_decreases_steps(self, guard_perm: GuardPerm<'rcu, C>)
self.inv(),self.level > 0,ensuresself.push_level_owner_spec(guard_perm).max_steps() < self.max_steps(),Sourcepub proof fn push_level_owner_preserves_va(self, guard_perm: GuardPerm<'rcu, C>)
pub proof fn push_level_owner_preserves_va(self, guard_perm: GuardPerm<'rcu, C>)
self.inv(),self.level > 1,ensuresself.push_level_owner_spec(guard_perm).va == self.va,self.push_level_owner_spec(guard_perm).continuations[self.level - 2].idx
== self.va.index[self.level - 2],Sourcepub proof fn push_level_owner_preserves_mappings(self, guard_perm: GuardPerm<'rcu, C>)
pub proof fn push_level_owner_preserves_mappings(self, guard_perm: GuardPerm<'rcu, C>)
self.inv(),self.level > 1,ensuresself.push_level_owner_spec(guard_perm)@.mappings == self@.mappings,Sourcepub proof fn push_level_owner_preserves_inv(self, guard_perm: GuardPerm<'rcu, C>)
pub proof fn push_level_owner_preserves_inv(self, guard_perm: GuardPerm<'rcu, C>)
self.inv(),self.level > 1,forall |i: int| {
self.level - 1 <= i < NR_LEVELS
==> self.continuations[i].guard_perm.addr() != guard_perm.addr()
},ensuresself.push_level_owner_spec(guard_perm).inv(),Sourcepub proof fn push_level_owner_preserves_invs(
self,
guard_perm: GuardPerm<'rcu, C>,
regions: MetaRegionOwners,
guards: Guards<'rcu, C>,
)
pub proof fn push_level_owner_preserves_invs( self, guard_perm: GuardPerm<'rcu, C>, regions: MetaRegionOwners, guards: Guards<'rcu, C>, )
self.inv(),self.level > 1,self.only_current_locked(guards),self.nodes_locked(guards),self.relate_region(regions),guards.lock_held(guard_perm.value().inner.inner@.ptr.addr()),ensuresself.push_level_owner_spec(guard_perm).inv(),self.push_level_owner_spec(guard_perm).children_not_locked(guards),self.push_level_owner_spec(guard_perm).nodes_locked(guards),self.push_level_owner_spec(guard_perm).relate_region(regions),Sourcepub proof fn push_level_owner(tracked &mut self, tracked guard_perm: Tracked<GuardPerm<'rcu, C>>)
pub proof fn push_level_owner(tracked &mut self, tracked guard_perm: Tracked<GuardPerm<'rcu, C>>)
old(self).inv(),old(self).level > 1,ensures*self == old(self).push_level_owner_spec(guard_perm@),Sourcepub open spec fn pop_level_owner_spec(self) -> (Self, GuardPerm<'rcu, C>)
pub open spec fn pop_level_owner_spec(self) -> (Self, GuardPerm<'rcu, C>)
{
let child = self.continuations[self.level - 1];
let cont = self.continuations[self.level as int];
let (new_cont, guard_perm) = cont.restore_spec(child);
let new_continuations = self.continuations.insert(self.level as int, new_cont);
let new_continuations = new_continuations.remove(self.level - 1);
let new_level = (self.level + 1) as u8;
let popped_too_high = if new_level >= self.guard_level { true } else { false };
(
Self {
continuations: new_continuations,
level: new_level,
popped_too_high: popped_too_high,
..self
},
guard_perm,
)
}Sourcepub proof fn pop_level_owner_preserves_inv(self)
pub proof fn pop_level_owner_preserves_inv(self)
self.inv(),self.level < NR_LEVELS,ensuresself.pop_level_owner_spec().0.inv(),Sourcepub proof fn pop_level_owner_preserves_invs(
self,
guards: Guards<'rcu, C>,
regions: MetaRegionOwners,
)
pub proof fn pop_level_owner_preserves_invs( self, guards: Guards<'rcu, C>, regions: MetaRegionOwners, )
self.inv(),self.level < NR_LEVELS,self.children_not_locked(guards),self.nodes_locked(guards),self.relate_region(regions),ensuresself.pop_level_owner_spec().0.inv(),self.pop_level_owner_spec().0.only_current_locked(guards),self.pop_level_owner_spec().0.nodes_locked(guards),self.pop_level_owner_spec().0.relate_region(regions),Sourcepub proof fn set_va_preserves_inv(self, new_va: AbstractVaddr)
pub proof fn set_va_preserves_inv(self, new_va: AbstractVaddr)
self.inv(),new_va.inv(),forall |i: int| self.level - 1 <= i < NR_LEVELS ==> new_va.index[i] == self.va.index[i],forall |i: int| {
self.guard_level - 1 <= i < NR_LEVELS ==> new_va.index[i] == self.prefix.index[i]
},ensuresself.set_va_spec(new_va).inv(),Update va to a new value that shares the same indices at levels >= self.level. This preserves invariants because:
- The new va satisfies va.inv()
- The indices at levels >= level match the continuation indices
- in_locked_range/above_locked_range depend on va but the preconditions ensure consistency
Sourcepub proof fn pop_level_owner(tracked &mut self) -> tracked guard_perm : GuardPerm<'rcu, C>
pub proof fn pop_level_owner(tracked &mut self) -> tracked guard_perm : GuardPerm<'rcu, C>
old(self).inv(),old(self).level < NR_LEVELS,ensures*self == old(self).pop_level_owner_spec().0,guard_perm == old(self).pop_level_owner_spec().1,Sourcepub open spec fn move_forward_owner_spec(self) -> Self
pub open spec fn move_forward_owner_spec(self) -> Self
self.inv(),self.level < NR_LEVELS,self.in_locked_range(),{
if self.index() + 1 < NR_ENTRIES {
self.inc_index().zero_below_level()
} else if self.level < NR_LEVELS {
self.pop_level_owner_spec().0.move_forward_owner_spec()
} else {
Self {
popped_too_high: false,
..self
}
}
}Sourcepub proof fn move_forward_increases_va(self)
pub proof fn move_forward_increases_va(self)
self.inv(),self.level <= NR_LEVELS,self.in_locked_range(),ensuresself.move_forward_owner_spec().va.to_vaddr() > self.va.to_vaddr(),Sourcepub proof fn move_forward_not_popped_too_high(self)
pub proof fn move_forward_not_popped_too_high(self)
self.inv(),self.level <= NR_LEVELS,self.in_locked_range(),ensures!self.move_forward_owner_spec().popped_too_high,Sourcepub proof fn move_forward_owner_decreases_steps(self)
pub proof fn move_forward_owner_decreases_steps(self)
self.inv(),self.level <= NR_LEVELS,ensuresself.move_forward_owner_spec().max_steps() < self.max_steps(),Sourcepub proof fn move_forward_va_is_align_up(self)
pub proof fn move_forward_va_is_align_up(self)
self.inv(),self.level <= NR_LEVELS,ensuresself.move_forward_owner_spec().va == self.va.align_up(self.level as int),Sourcepub proof fn pop_level_owner_preserves_mappings(self)
pub proof fn pop_level_owner_preserves_mappings(self)
self.inv(),self.level < NR_LEVELS,self.in_locked_range(),ensuresself.pop_level_owner_spec().0@.mappings == self@.mappings,After popping a level, the total view_mappings is preserved. The restored parent at index self.level absorbs the child’s mappings, and both are within the view_mappings range [self.level, NR_LEVELS).
Sourcepub proof fn move_forward_owner_preserves_mappings(self)
pub proof fn move_forward_owner_preserves_mappings(self)
self.inv(),self.in_locked_range(),ensuresself.move_forward_owner_spec()@.mappings == self@.mappings,Sourcepub proof fn move_forward_owner_preserves_in_locked_range(self)
pub proof fn move_forward_owner_preserves_in_locked_range(self)
self.inv(),self.level <= NR_LEVELS,self.in_locked_range(),ensuresself.move_forward_owner_spec().in_locked_range(),After move_forward_owner_spec, the cursor remains within the locked range.
Sourcepub proof fn move_forward_pop_loop_level_lt_guard(self, owner0: Self)
pub proof fn move_forward_pop_loop_level_lt_guard(self, owner0: Self)
self.inv(),self.in_locked_range(),self.level <= self.guard_level,owner0.inv(),owner0.in_locked_range(),self.move_forward_owner_spec() == owner0.move_forward_owner_spec(),!owner0.move_forward_owner_spec().popped_too_high,ensuresself.level < self.guard_level,After the pop loop in move_forward, the cursor level is strictly below guard_level.
The loop exits when level >= guard_level OR pte_index != 0. The level >= guard_level
case is impossible: reaching guard_level via pop would set popped_too_high = true, but
the loop invariant owner.move_forward_owner_spec() == owner0.move_forward_owner_spec()
combined with move_forward_not_popped_too_high gives
!owner.move_forward_owner_spec().popped_too_high, and move_forward_owner_spec()
on a popped_too_high state gives a different result — a contradiction.
Source§impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>
impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>
Sourcepub open spec fn node_unlocked(
guards: Guards<'rcu, C>,
) -> FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>
pub open spec fn node_unlocked( guards: Guards<'rcu, C>, ) -> FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>
{
|owner: EntryOwner<C>, path: TreePath<NR_ENTRIES>| {
owner.is_node() ==> guards.unlocked(owner.node.unwrap().meta_perm.addr())
}
}Sourcepub open spec fn node_unlocked_except(
guards: Guards<'rcu, C>,
addr: usize,
) -> FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>
pub open spec fn node_unlocked_except( guards: Guards<'rcu, C>, addr: usize, ) -> FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>
{
|owner: EntryOwner<C>, path: TreePath<NR_ENTRIES>| {
owner.is_node()
==> (owner.node.unwrap().meta_perm.addr() != addr
==> guards.unlocked(owner.node.unwrap().meta_perm.addr()))
}
}Sourcepub open spec fn map_full_tree(
self,
f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>,
) -> bool
pub open spec fn map_full_tree( self, f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>, ) -> bool
{
forall |i: int| {
self.level - 1 <= i < NR_LEVELS ==> { self.continuations[i].map_children(f) }
}
}Sourcepub open spec fn map_only_children(
self,
f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>,
) -> bool
pub open spec fn map_only_children( self, f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>, ) -> bool
{
forall |i: int| {
self.level - 1 <= i < NR_LEVELS ==> self.continuations[i].map_children(f)
}
}Sourcepub open spec fn children_not_locked(self, guards: Guards<'rcu, C>) -> bool
pub open spec fn children_not_locked(self, guards: Guards<'rcu, C>) -> bool
{ self.map_only_children(Self::node_unlocked(guards)) }Sourcepub open spec fn only_current_locked(self, guards: Guards<'rcu, C>) -> bool
pub open spec fn only_current_locked(self, guards: Guards<'rcu, C>) -> bool
{
self.map_only_children(
Self::node_unlocked_except(
guards,
self.cur_entry_owner().node.unwrap().meta_perm.addr(),
),
)
}Sourcepub proof fn never_drop_restores_children_not_locked(
self,
guard: PageTableGuard<'rcu, C>,
guards0: Guards<'rcu, C>,
guards1: Guards<'rcu, C>,
)
pub proof fn never_drop_restores_children_not_locked( self, guard: PageTableGuard<'rcu, C>, guards0: Guards<'rcu, C>, guards1: Guards<'rcu, C>, )
self.inv(),self.only_current_locked(guards0),<PageTableGuard<'rcu, C> as TrackDrop>::constructor_requires(guard, guards0),<PageTableGuard<'rcu, C> as TrackDrop>::constructor_ensures(guard, guards0, guards1),ensuresself.children_not_locked(guards1),Sourcepub proof fn never_drop_restores_nodes_locked(
self,
guard: PageTableGuard<'rcu, C>,
guards0: Guards<'rcu, C>,
guards1: Guards<'rcu, C>,
)
pub proof fn never_drop_restores_nodes_locked( self, guard: PageTableGuard<'rcu, C>, guards0: Guards<'rcu, C>, guards1: Guards<'rcu, C>, )
self.inv(),self.nodes_locked(guards0),<PageTableGuard<'rcu, C> as TrackDrop>::constructor_requires(guard, guards0),<PageTableGuard<'rcu, C> as TrackDrop>::constructor_ensures(guard, guards0, guards1),forall |i: int| {
self.level - 1 <= i < NR_LEVELS
==> self.continuations[i].guard_perm.value().inner.inner@.ptr.addr()
!= guard.inner.inner@.ptr.addr()
},ensuresself.nodes_locked(guards1),After dropping the guard for the popped level, nodes_locked is preserved
for the new (higher-level) owner, because the dropped guard’s address is not
among those checked by nodes_locked (which covers levels >= self.level - 1).
Sourcepub proof fn protect_preserves_cursor_inv_relate(
self,
other: Self,
regions: MetaRegionOwners,
)
pub proof fn protect_preserves_cursor_inv_relate( self, other: Self, regions: MetaRegionOwners, )
self.inv(),self.relate_region(regions),self.cur_entry_owner().is_frame(),other.cur_entry_owner().is_frame(),other.cur_entry_owner().inv(),other.cur_entry_owner().frame.unwrap().mapped_pa
== self.cur_entry_owner().frame.unwrap().mapped_pa,other.cur_entry_owner().frame.unwrap().slot_perm
== self.cur_entry_owner().frame.unwrap().slot_perm,other.cur_entry_owner().path == self.cur_entry_owner().path,other.cur_entry_owner().parent_level == self.cur_entry_owner().parent_level,self.level == other.level,self.guard_level == other.guard_level,self.va == other.va,self.prefix == other.prefix,self.popped_too_high == other.popped_too_high,forall |i: int| {
self.level <= i < NR_LEVELS
==> #[trigger] self.continuations[i] == other.continuations[i]
},other.continuations[self.level - 1].inv(),other.continuations[self.level - 1].all_some(),ensuresother.inv(),other.relate_region(regions),After a protect operation that only modifies frame.prop of the current entry,
CursorOwner::inv() and relate_region are preserved.
Safety: protect changes only frame.prop and updates parent.children_perm to match.
EntryOwner::inv() is preserved (from protect postcondition).
relate_region is preserved because it doesn’t use frame.prop.
rel_children holds via match_pte (from protect’s wf/node_matching postconditions).
The axiom requires only the semantic properties of the modified entry that are
checked by inv and relate_region; the structural identity of other continuations
is trusted to hold from the tracked restore operations in the caller.
Sourcepub proof fn map_children_implies(
self,
f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>,
g: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>,
)
pub proof fn map_children_implies( self, f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>, g: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>, )
self.inv(),OwnerSubtree::implies(f, g),forall |i: int| self.level - 1 <= i < NR_LEVELS ==> self.continuations[i].map_children(f),ensuresforall |i: int| self.level - 1 <= i < NR_LEVELS ==> self.continuations[i].map_children(g),Sourcepub open spec fn nodes_locked(self, guards: Guards<'rcu, C>) -> bool
pub open spec fn nodes_locked(self, guards: Guards<'rcu, C>) -> bool
{
forall |i: int| {
self.level - 1 <= i < NR_LEVELS ==> { self.continuations[i].node_locked(guards) }
}
}Sourcepub open spec fn inc_index(self) -> Self
pub open spec fn inc_index(self) -> Self
{
Self {
continuations: self
.continuations
.insert(self.level - 1, self.continuations[self.level - 1].inc_index()),
va: AbstractVaddr {
offset: self.va.offset,
index: self
.va
.index
.insert(
self.level - 1,
self.continuations[self.level - 1].inc_index().idx as int,
),
},
popped_too_high: false,
..self
}
}Sourcepub open spec fn zero_below_level_rec(self, level: PagingLevel) -> Self
pub open spec fn zero_below_level_rec(self, level: PagingLevel) -> Self
{
if self.level <= level {
self
} else {
Self {
va: AbstractVaddr {
offset: self.va.offset,
index: self.va.index.insert(level - 1, 0),
},
..self.zero_below_level_rec((level + 1) as u8)
}
}
}Sourcepub open spec fn zero_below_level(self) -> Self
pub open spec fn zero_below_level(self) -> Self
{ self.zero_below_level_rec(1u8) }Sourcepub proof fn zero_below_level_rec_preserves_above(self, level: PagingLevel)
pub proof fn zero_below_level_rec_preserves_above(self, level: PagingLevel)
forall |lv: int| {
lv >= self.level
==> self.zero_below_level_rec(level).va.index[lv] == #[trigger] self.va.index[lv]
},Sourcepub proof fn zero_preserves_above(self)
pub proof fn zero_preserves_above(self)
forall |lv: int| {
lv >= self.level
==> self.zero_below_level().va.index[lv] == #[trigger] self.va.index[lv]
},Sourcepub proof fn do_zero_below_level(tracked &mut self)
pub proof fn do_zero_below_level(tracked &mut self)
old(self).inv(),ensures*self == old(self).zero_below_level(),Sourcepub proof fn do_inc_index(tracked &mut self)
pub proof fn do_inc_index(tracked &mut self)
old(self).inv(),old(self).continuations[old(self).level - 1].idx + 1 < NR_ENTRIES,old(self).level == NR_LEVELS
==> (old(self).continuations[old(self).level - 1].idx + 1)
< C::TOP_LEVEL_INDEX_RANGE_spec().end,ensuresself.inv(),*self == old(self).inc_index(),Sourcepub proof fn inc_and_zero_increases_va(self)
pub proof fn inc_and_zero_increases_va(self)
self.inv(),ensuresself.inc_index().zero_below_level().va.to_vaddr() > self.va.to_vaddr(),Sourcepub proof fn zero_rec_preserves_all_but_va(self, level: PagingLevel)
pub proof fn zero_rec_preserves_all_but_va(self, level: PagingLevel)
self.zero_below_level_rec(level).level == self.level,self.zero_below_level_rec(level).continuations == self.continuations,self.zero_below_level_rec(level).guard_level == self.guard_level,self.zero_below_level_rec(level).prefix == self.prefix,self.zero_below_level_rec(level).popped_too_high == self.popped_too_high,Sourcepub proof fn zero_preserves_all_but_va(self)
pub proof fn zero_preserves_all_but_va(self)
self.zero_below_level().level == self.level,self.zero_below_level().continuations == self.continuations,self.zero_below_level().guard_level == self.guard_level,self.zero_below_level().prefix == self.prefix,self.zero_below_level().popped_too_high == self.popped_too_high,Sourcepub open spec fn cur_va_range(self) -> Range<AbstractVaddr>
pub open spec fn cur_va_range(self) -> Range<AbstractVaddr>
{
let start = self.va.align_down(self.level as int);
let end = self.va.align_up(self.level as int);
Range { start, end }
}Sourcepub proof fn cur_va_range_reflects_view(self)
pub proof fn cur_va_range_reflects_view(self)
self.inv(),ensuresself.cur_va_range().start.reflect(self@.query_range().start),self.cur_va_range().end.reflect(self@.query_range().end),Sourcepub proof fn cur_va_in_subtree_range(self)
pub proof fn cur_va_in_subtree_range(self)
self.inv(),ensuresvaddr(self.cur_subtree().value.path) <= self.cur_va()
< vaddr(self.cur_subtree().value.path) + page_size(self.level as PagingLevel),The current virtual address falls within the VA range of the current subtree’s path.
Sourcepub proof fn cur_subtree_eq_filtered_mappings(self)
pub proof fn cur_subtree_eq_filtered_mappings(self)
self.inv(),ensuresPageTableOwner(self.cur_subtree())@.mappings
== self@
.mappings
.filter(|m: Mapping| (
self@.cur_va <= m.va_range.start < self@.cur_va + page_size(self.level)
)),The current subtree’s mappings equal the filter over [cur_va, cur_va + page_size(level)).
Sourcepub proof fn subtree_va_ranges_disjoint(self, j: int)
pub proof fn subtree_va_ranges_disjoint(self, j: int)
self.inv(),0 <= j < NR_ENTRIES,j != self.index(),self.continuations[self.level - 1].children[j] is Some,ensuresvaddr(self.continuations[self.level - 1].path().push_tail(j as usize))
+ page_size(self.level as PagingLevel) <= self.cur_va()
|| self.cur_va()
< vaddr(self.continuations[self.level - 1].path().push_tail(j as usize)),Subtrees at different indices have disjoint VA ranges.
Sourcepub proof fn higher_level_children_disjoint(self, i: int, j: int)
pub proof fn higher_level_children_disjoint(self, i: int, j: int)
self.inv(),self.level - 1 < i < NR_LEVELS,0 <= j < NR_ENTRIES,j != self.continuations[i].idx,self.continuations[i].children[j] is Some,ensuresvaddr(self.continuations[i].path().push_tail(j as usize))
+ page_size((i + 1) as PagingLevel) <= self.cur_va()
|| self.cur_va() < vaddr(self.continuations[i].path().push_tail(j as usize)),Children of higher-level continuations have VA ranges that don’t include cur_va, because cur_va’s indices at those levels match the path to the current position.
Sourcepub proof fn mapping_covering_cur_va_from_cur_subtree(self, m: Mapping)
pub proof fn mapping_covering_cur_va_from_cur_subtree(self, m: Mapping)
self.inv(),self.view_mappings().contains(m),m.va_range.start <= self.cur_va() < m.va_range.end,ensuresPageTableOwner(self.cur_subtree()).view_rec(self.cur_subtree().value.path).contains(m),Any mapping that covers cur_va must come from the current subtree. This follows from the disjointness of VA ranges and the fact that cur_va falls within the current subtree’s VA range.
Sourcepub proof fn inv_continuation(self, i: int)
pub proof fn inv_continuation(self, i: int)
self.inv(),self.level - 1 <= i <= NR_LEVELS - 1,ensuresself.continuations.contains_key(i),self.continuations[i].inv(),self.continuations[i].children.len() == NR_ENTRIES,Sourcepub open spec fn view_mappings(self) -> Set<Mapping>
pub open spec fn view_mappings(self) -> Set<Mapping>
{
Set::new(|m: Mapping| {
exists |i: int| {
self.level - 1 <= i < NR_LEVELS
&& self.continuations[i].view_mappings().contains(m)
}
})
}Sourcepub proof fn view_mappings_take_lowest(self, new: Self)
pub proof fn view_mappings_take_lowest(self, new: Self)
self.inv(),new.continuations == self.continuations.remove(self.level - 1),ensuresnew.view_mappings()
== self.view_mappings() - self.continuations[self.level - 1].view_mappings(),Sourcepub proof fn view_mappings_put_lowest(
self,
new: Self,
cont: CursorContinuation<'_, C>,
)
pub proof fn view_mappings_put_lowest( self, new: Self, cont: CursorContinuation<'_, C>, )
cont.inv(),new.inv(),new.continuations == self.continuations.insert(self.level - 1, cont),ensuresnew.view_mappings() == self.view_mappings() + cont.view_mappings(),Sourcepub open spec fn as_page_table_owner(self) -> PageTableOwner<C>
pub open spec fn as_page_table_owner(self) -> PageTableOwner<C>
{
if self.level == 1 {
let l1 = self.continuations[0];
let l2 = self.continuations[1].restore_spec(l1).0;
let l3 = self.continuations[2].restore_spec(l2).0;
let l4 = self.continuations[3].restore_spec(l3).0;
l4.as_page_table_owner()
} else if self.level == 2 {
let l2 = self.continuations[1];
let l3 = self.continuations[2].restore_spec(l2).0;
let l4 = self.continuations[3].restore_spec(l3).0;
l4.as_page_table_owner()
} else if self.level == 3 {
let l3 = self.continuations[2];
let l4 = self.continuations[3].restore_spec(l3).0;
l4.as_page_table_owner()
} else {
let l4 = self.continuations[3];
l4.as_page_table_owner()
}
}Sourcepub proof fn as_page_table_owner_preserves_view_mappings(self)
pub proof fn as_page_table_owner_preserves_view_mappings(self)
self.inv(),ensuresself.as_page_table_owner().view_rec(self.continuations[3].path()) == self.view_mappings(),Sourcepub open spec fn cur_entry_owner(self) -> EntryOwner<C>
pub open spec fn cur_entry_owner(self) -> EntryOwner<C>
{ self.cur_subtree().value }Sourcepub open spec fn cur_subtree(self) -> OwnerSubtree<C>
pub open spec fn cur_subtree(self) -> OwnerSubtree<C>
{ self.continuations[self.level - 1].children[self.index() as int].unwrap() }Sourcepub proof fn borrow_cur_frame_slot_perm(tracked &self) -> tracked res : &PointsTo<MetaSlot>
pub proof fn borrow_cur_frame_slot_perm(tracked &self) -> tracked res : &PointsTo<MetaSlot>
self.cur_entry_owner().is_frame(),ensures*res == self.cur_entry_owner().frame.unwrap().slot_perm,Borrows the slot permission from the current frame entry owner.
This is an axiom because expressing the structural borrow through the
nested Map/Seq/Option layers is not yet supported directly in Verus.
The axiom is safe: it only provides a shared reference to data already
logically owned by self, and the borrow cannot outlive self.
Sourcepub proof fn cur_frame_clone_requires(
self,
item: C::Item,
pa: Paddr,
level: PagingLevel,
prop: PageProperty,
regions: MetaRegionOwners,
)
pub proof fn cur_frame_clone_requires( self, item: C::Item, pa: Paddr, level: PagingLevel, prop: PageProperty, regions: MetaRegionOwners, )
self.inv(),self.relate_region(regions),self.cur_entry_owner().is_frame(),pa == self.cur_entry_owner().frame.unwrap().mapped_pa,C::item_from_raw_spec(pa, level, prop) == item,ensuresitem
.clone_requires(
self.cur_entry_owner().frame.unwrap().slot_perm,
regions.slot_owners[frame_to_index(pa)].inner_perms.ref_count,
),Axiom: the item reconstructed from the current frame’s physical address satisfies clone_requires.
Safety: When relate_region holds for a frame entry, the item reconstructed via
item_from_raw_spec(pa, ...) is the original frame item. The frame’s slot permission
(owned by the cursor) has the correct address, is initialised, and its ref count is in the
valid clonable range (> 0, < REF_COUNT_MAX), so clone_requires is satisfied.
Sourcepub proof fn clone_item_preserves_invariants(
self,
old_regions: MetaRegionOwners,
new_regions: MetaRegionOwners,
idx: usize,
)
pub proof fn clone_item_preserves_invariants( self, old_regions: MetaRegionOwners, new_regions: MetaRegionOwners, idx: usize, )
self.inv(),self.relate_region(old_regions),old_regions.inv(),self.cur_entry_owner().is_frame(),idx == frame_to_index(self.cur_entry_owner().frame.unwrap().mapped_pa),old_regions.slot_owners.contains_key(idx),new_regions.slot_owners.contains_key(idx),new_regions.slot_owners[idx].inner_perms.ref_count.value()
== old_regions.slot_owners[idx].inner_perms.ref_count.value() + 1,new_regions.slot_owners[idx].inner_perms.ref_count.id()
== old_regions.slot_owners[idx].inner_perms.ref_count.id(),new_regions.slot_owners[idx].inner_perms.storage
== old_regions.slot_owners[idx].inner_perms.storage,new_regions.slot_owners[idx].inner_perms.vtable_ptr
== old_regions.slot_owners[idx].inner_perms.vtable_ptr,new_regions.slot_owners[idx].inner_perms.in_list
== old_regions.slot_owners[idx].inner_perms.in_list,new_regions.slot_owners[idx].path_if_in_pt == old_regions.slot_owners[idx].path_if_in_pt,new_regions.slot_owners[idx].self_addr == old_regions.slot_owners[idx].self_addr,new_regions.slot_owners[idx].raw_count == old_regions.slot_owners[idx].raw_count,new_regions.slot_owners[idx].usage == old_regions.slot_owners[idx].usage,new_regions.slot_owners.dom() == old_regions.slot_owners.dom(),forall |i: usize| {
i != idx && old_regions.slot_owners.contains_key(i)
==> new_regions.slot_owners[i] == old_regions.slot_owners[i]
},new_regions.slots == old_regions.slots,0 < old_regions.slot_owners[idx].inner_perms.ref_count.value(),old_regions.slot_owners[idx].inner_perms.ref_count.value() + 1 < REF_COUNT_MAX,ensuresnew_regions.inv(),self.relate_region(new_regions),Incrementing the ref count of the current frame preserves regions.inv() and
self.relate_region(new_regions).
Sourcepub proof fn cur_entry_node_implies_level_gt_1(self)
pub proof fn cur_entry_node_implies_level_gt_1(self)
self.inv(),self.cur_entry_owner().is_node(),ensuresself.level > 1,If the current entry is a page table node, the cursor must be at level >= 2.
Proof: the current child subtree has is_node(), so by the ghost-tree la_inv
(is_node() ==> tree_level < INC_LEVELS - 1), its tree level satisfies
INC_LEVELS - self.level < INC_LEVELS - 1, i.e., self.level > 1.
Sourcepub proof fn frame_not_fits_implies_level_gt_1(
self,
cur_entry_fits_range: bool,
cur_va: Vaddr,
end: Vaddr,
)
pub proof fn frame_not_fits_implies_level_gt_1( self, cur_entry_fits_range: bool, cur_va: Vaddr, end: Vaddr, )
self.inv(),self.cur_entry_owner().is_frame(),!cur_entry_fits_range,cur_va < end,cur_entry_fits_range
== (cur_va == self.cur_va_range().start.to_vaddr()
&& self.cur_va_range().end.to_vaddr() <= end),cur_va as nat % PAGE_SIZE as nat == 0,end as nat % PAGE_SIZE as nat == 0,ensuresself.level > 1,A frame entry at the cursor’s current level that doesn’t fit the aligned range
[cur_va, end) must be at level > 1.
Justification: At level 1, page_size(1) == BASE_PAGE_SIZE. Since the cursor VA
and end are BASE_PAGE_SIZE-aligned and cur_va < end, we have
cur_va + page_size(1) <= end, so a level-1 frame always fits. Therefore
!cur_entry_fits_range implies level > 1.
Sourcepub proof fn split_while_huge_node_noop(self)
pub proof fn split_while_huge_node_noop(self)
self.inv(),self.cur_entry_owner().is_node(),self.level > 1,ensuresself@.split_while_huge(page_size((self.level - 1) as PagingLevel)) == self@,Sourcepub proof fn split_while_huge_absent_noop(self, size: usize)
pub proof fn split_while_huge_absent_noop(self, size: usize)
self.inv(),self.cur_entry_owner().is_absent(),ensuresself@.split_while_huge(size) == self@,When the current entry is absent, there is no mapping at cur_va in the abstract view,
so split_while_huge finds nothing to split and is a no-op for any target size.
Sourcepub proof fn split_while_huge_at_level_noop(self)
pub proof fn split_while_huge_at_level_noop(self)
self.inv(),ensuresself@.split_while_huge(page_size(self.level as PagingLevel)) == self@,Sourcepub proof fn new_child_mappings_eq_target(
self,
new_subtree: OwnerSubtree<C>,
pa: Paddr,
level: PagingLevel,
prop: PageProperty,
)
pub proof fn new_child_mappings_eq_target( self, new_subtree: OwnerSubtree<C>, pa: Paddr, level: PagingLevel, prop: PageProperty, )
self.inv(),level == self.level,new_subtree.inv(),new_subtree.value.is_frame(),new_subtree.value.path
== self
.continuations[self.level as int - 1]
.path()
.push_tail(self.continuations[self.level as int - 1].idx as usize),new_subtree.value.frame.unwrap().mapped_pa == pa,new_subtree.value.frame.unwrap().prop == prop,ensuresPageTableOwner(new_subtree)@.mappings
== set![
Mapping { va_range : self @.cur_slot_range(page_size(level)), pa_range : pa.. (pa
+ page_size(level)) as usize, page_size : page_size(level), property : prop, }
],A new frame subtree at the current position has mappings equal to the singleton mapping covering the current slot range.
Sourcepub proof fn map_branch_none_inv_holds(self, owner0: Self)
pub proof fn map_branch_none_inv_holds(self, owner0: Self)
owner0.inv(),self.level == owner0.level,self.va == owner0.va,self.guard_level == owner0.guard_level,self.prefix == owner0.prefix,self.popped_too_high == owner0.popped_too_high,forall |i: int| {
self.level <= i < NR_LEVELS
==> #[trigger] self.continuations[i] == owner0.continuations[i]
},self.continuations[self.level - 1].inv(),self.continuations[self.level - 1].all_some(),self.continuations[self.level - 1].idx == owner0.continuations[owner0.level - 1].idx,self.continuations[self.level - 1].entry_own.parent_level
== owner0.continuations[owner0.level - 1].entry_own.parent_level,self.continuations[self.level - 1].guard_perm.value().inner.inner@.ptr.addr()
== owner0.continuations[owner0.level - 1].guard_perm.value().inner.inner@.ptr.addr(),self.continuations[self.level - 1].path()
== owner0.continuations[owner0.level - 1].path(),self.va.index[self.level - 1] == self.continuations[self.level - 1].idx,self.continuations.dom() =~= owner0.continuations.dom(),ensuresself.inv(),After alloc_if_none (absent→node) + restore, the cursor invariant holds.
Sourcepub proof fn map_branch_none_path_tracked_holds(
self,
owner0: Self,
regions: MetaRegionOwners,
old_regions: MetaRegionOwners,
)
pub proof fn map_branch_none_path_tracked_holds( self, owner0: Self, regions: MetaRegionOwners, old_regions: MetaRegionOwners, )
owner0.relate_region(old_regions),self.inv(),self.level == owner0.level,forall |i: int| {
self.level <= i < NR_LEVELS
==> #[trigger] self.continuations[i] == owner0.continuations[i]
},Entry::<C>::path_tracked_pred_preserved(old_regions, regions),self
.continuations[self.level - 1]
.map_children(PageTableOwner::<C>::path_tracked_pred(regions)),ensuresself.map_full_tree(PageTableOwner::<C>::path_tracked_pred(regions)),After alloc_if_none (absent→node), path_tracked_pred transfers via map_children_lift.
Sourcepub proof fn map_branch_none_no_new_mappings(self, owner0: Self)
pub proof fn map_branch_none_no_new_mappings(self, owner0: Self)
owner0.inv(),self.inv(),self.level == owner0.level,self.va == owner0.va,forall |i: int| {
self.level <= i < NR_LEVELS
==> #[trigger] self.continuations[i] == owner0.continuations[i]
},owner0
.continuations[owner0.level - 1]
.children[owner0.continuations[owner0.level - 1].idx as int] is Some,owner0
.continuations[owner0.level - 1]
.children[owner0.continuations[owner0.level - 1].idx as int]
.unwrap()
.value
.is_absent(),self
.continuations[self.level - 1]
.children[self.continuations[self.level - 1].idx as int] is Some,self
.continuations[self.level - 1]
.children[self.continuations[self.level - 1].idx as int]
.unwrap()
.value
.is_node(),self.continuations[self.level - 1].path()
== owner0.continuations[owner0.level - 1].path(),forall |j: int| {
0 <= j < NR_ENTRIES && j != owner0.continuations[owner0.level - 1].idx as int
==> #[trigger] self.continuations[self.level - 1].children[j]
== owner0.continuations[owner0.level - 1].children[j]
},PageTableOwner(
self
.continuations[self.level - 1]
.children[self.continuations[self.level - 1].idx as int]
.unwrap(),
)
.view_rec(
self
.continuations[self.level - 1]
.path()
.push_tail(self.continuations[self.level - 1].idx as usize),
) =~= Set::<Mapping>::empty(),ensuresself.view_mappings() =~= owner0.view_mappings(),After alloc_if_none (absent→node), view_mappings is unchanged (both contribute zero mappings).
Sourcepub proof fn map_branch_none_cur_entry_absent(self)
pub proof fn map_branch_none_cur_entry_absent(self)
self.inv(),forall |i: int| {
0 <= i < NR_ENTRIES
==> #[trigger] self.continuations[self.level - 1].children[i] is Some
&& self.continuations[self.level - 1].children[i].unwrap().value.is_absent()
},ensuresself.cur_entry_owner().is_absent(),After map_branch_none (alloc_if_none + push_level), the current entry is absent.
Proof: alloc_if_none creates an empty PT node where all children are absent
(allocated_empty_node_owner line 172). push_level enters one of these children,
so cur_entry_owner().is_absent() holds.
Sourcepub proof fn map_branch_frame_split_while_huge(
self,
owner0: Self,
owner_before_frame: Self,
level_before_frame: int,
)
pub proof fn map_branch_frame_split_while_huge( self, owner0: Self, owner_before_frame: Self, level_before_frame: int, )
self.inv(),owner0.inv(),owner_before_frame.inv(),1 <= level_before_frame - 1,level_before_frame <= NR_LEVELS,self.level == (level_before_frame - 1) as u8,owner_before_frame@
== owner0@.split_while_huge(page_size(level_before_frame as PagingLevel)),self@
== owner_before_frame@
.split_if_mapped_huge_spec(page_size((level_before_frame - 1) as PagingLevel)),ensuresself@ == owner0@.split_while_huge(page_size(self.level as PagingLevel)),After map_branch_none splits a huge frame at level level_before_frame and descends,
the cursor view equals owner0@.split_while_huge(page_size(level_before_frame - 1)).
Chain: owner@ = owner_before_frame@.split_if_mapped_huge_spec(page_size(level_before_frame - 1)) = owner0@.split_while_huge(page_size(level_before_frame)).split_if_mapped_huge_spec(…) = owner0@.split_while_huge(page_size(level_before_frame - 1)) The last equality uses the fact that split_while_huge(L) on a frame of size page_size(L) takes exactly one split step to page_size(L-1), matching split_if_mapped_huge_spec.
Sourcepub proof fn find_next_split_push_equals_split_while_huge(
self,
old_view: CursorView<C>,
)
pub proof fn find_next_split_push_equals_split_while_huge( self, old_view: CursorView<C>, )
self.inv(),self.cur_entry_owner().is_frame(),self@.cur_va == old_view.cur_va,old_view.present(),self@.mappings
=~= old_view.split_if_mapped_huge_spec(page_size(self.level as PagingLevel)).mappings,ensuresself@.mappings
=~= old_view.split_while_huge(page_size(self.level as PagingLevel)).mappings,After split_if_mapped_huge + push_level, the mappings equal
old_view.split_while_huge(page_size(current_level)).
Sourcepub proof fn split_while_huge_cur_va_independent(
v1: CursorView<C>,
v2: CursorView<C>,
size: usize,
)
pub proof fn split_while_huge_cur_va_independent( v1: CursorView<C>, v2: CursorView<C>, size: usize, )
v1.inv(),v2.inv(),v1.mappings =~= v2.mappings,v1.cur_va <= v2.cur_va,v1
.mappings
.filter(|m: Mapping| v1.cur_va <= m.va_range.start && m.va_range.start < v2.cur_va)
=~= Set::<Mapping>::empty(),!v1.present() && v2.present() ==> v2.query_mapping().page_size <= size,ensuresv1.split_while_huge(size).mappings =~= v2.split_while_huge(size).mappings,split_while_huge gives the same mappings for two cur_va values
when no mapping starts between them and the !present case is a no-op.
Sourcepub open spec fn locked_range(self) -> Range<Vaddr>
pub open spec fn locked_range(self) -> Range<Vaddr>
{
let start = self.prefix.align_down(self.guard_level as int).to_vaddr();
let end = self.prefix.align_up(self.guard_level as int).to_vaddr();
Range { start, end }
}Sourcepub open spec fn in_locked_range(self) -> bool
pub open spec fn in_locked_range(self) -> bool
{ self.locked_range().start <= self.va.to_vaddr() < self.locked_range().end }Sourcepub open spec fn above_locked_range(self) -> bool
pub open spec fn above_locked_range(self) -> bool
{ self.va.to_vaddr() >= self.locked_range().end }Sourcepub proof fn prefix_in_locked_range(self)
pub proof fn prefix_in_locked_range(self)
forall |i: int| i >= self.guard_level ==> self.va.index[i] == self.prefix.index[i],ensuresself.in_locked_range(),Sourcepub proof fn in_locked_range_level_lt_nr_levels(self)
pub proof fn in_locked_range_level_lt_nr_levels(self)
self.inv(),self.in_locked_range(),!self.popped_too_high,ensuresself.level < NR_LEVELS,Sourcepub proof fn in_locked_range_level_lt_guard_level(self)
pub proof fn in_locked_range_level_lt_guard_level(self)
self.inv(),self.in_locked_range(),!self.popped_too_high,ensuresself.level < self.guard_level,Sourcepub proof fn node_within_locked_range(self, level: PagingLevel)
pub proof fn node_within_locked_range(self, level: PagingLevel)
self.in_locked_range(),1 <= level < self.guard_level,self.va.inv(),ensuresself.locked_range().start
<= nat_align_down(
self.va.to_vaddr() as nat,
page_size((level + 1) as PagingLevel) as nat,
) as usize,nat_align_down(self.va.to_vaddr() as nat, page_size((level + 1) as PagingLevel) as nat)
as usize + page_size((level + 1) as PagingLevel) <= self.locked_range().end,The node at level+1 containing va fits within the locked range.
Sourcepub proof fn locked_range_page_aligned(self)
pub proof fn locked_range_page_aligned(self)
self.inv(),ensuresself.locked_range().end % PAGE_SIZE == 0,self.locked_range().start % PAGE_SIZE == 0,Sourcepub proof fn cur_subtree_inv(self)
pub proof fn cur_subtree_inv(self)
self.inv(),ensuresself.cur_subtree().inv(),Sourcepub proof fn cur_entry_absent_not_present(self)
pub proof fn cur_entry_absent_not_present(self)
self.inv(),self.cur_entry_owner().is_absent(),ensures!self@.present(),If the current entry is absent, !self@.present().
Sourcepub proof fn cur_subtree_empty_not_present(self)
pub proof fn cur_subtree_empty_not_present(self)
self.inv(),PageTableOwner(self.cur_subtree()).view_rec(self.cur_subtree().value.path) =~= set![],ensures!self@.present(),Generalises cur_entry_absent_not_present to any empty subtree.
Sourcepub proof fn cur_entry_frame_present(self)
pub proof fn cur_entry_frame_present(self)
self.inv(),self.cur_entry_owner().is_frame(),ensuresself@.present(),self@
.query(
self.cur_entry_owner().frame.unwrap().mapped_pa,
self.cur_entry_owner().frame.unwrap().size,
self.cur_entry_owner().frame.unwrap().prop,
),Sourcepub open spec fn relate_region(self, regions: MetaRegionOwners) -> bool
pub open spec fn relate_region(self, regions: MetaRegionOwners) -> bool
{
&&& self
.map_full_tree(|entry_owner: EntryOwner<C>, path: TreePath<NR_ENTRIES>| {
entry_owner.relate_region(regions)
})
&&& self.map_full_tree(PageTableOwner::<C>::path_tracked_pred(regions))
}Sourcepub open spec fn not_in_tree(self, owner: EntryOwner<C>) -> bool
pub open spec fn not_in_tree(self, owner: EntryOwner<C>) -> bool
{
self.map_full_tree(|owner0: EntryOwner<C>, path: TreePath<NR_ENTRIES>| {
owner0.meta_slot_paddr_neq(owner)
})
}Sourcepub proof fn absent_not_in_tree(self, owner: EntryOwner<C>)
pub proof fn absent_not_in_tree(self, owner: EntryOwner<C>)
self.inv(),owner.inv(),owner.is_absent(),ensuresself.not_in_tree(owner),Sourcepub proof fn not_in_tree_from_not_mapped(
self,
regions: MetaRegionOwners,
new_entry: EntryOwner<C>,
)
pub proof fn not_in_tree_from_not_mapped( self, regions: MetaRegionOwners, new_entry: EntryOwner<C>, )
self.inv(),self.relate_region(regions),new_entry.meta_slot_paddr() is Some,regions
.slot_owners[frame_to_index(new_entry.meta_slot_paddr().unwrap())]
.path_if_in_pt is None,ensuresself.not_in_tree(new_entry),If the cursor owner’s tree satisfies relate_region(regions), and a new entry’s physical
address is not currently tracked in the page table (path_if_in_pt is None), then no
existing entry in the tree has the same physical address as the new entry.
This lemma encapsulates the map_children_implies proof for not_in_tree, factored out
so it runs in its own Z3 context (avoiding rlimit issues when called from large functions).
Sourcepub proof fn relate_region_preserved(
self,
other: Self,
regions0: MetaRegionOwners,
regions1: MetaRegionOwners,
)
pub proof fn relate_region_preserved( self, other: Self, regions0: MetaRegionOwners, regions1: MetaRegionOwners, )
self.inv(),self.relate_region(regions0),self.level == other.level,self.continuations =~= other.continuations,OwnerSubtree::implies(
PageTableOwner::<C>::relate_region_pred(regions0),
PageTableOwner::<C>::relate_region_pred(regions1),
),OwnerSubtree::implies(
PageTableOwner::<C>::path_tracked_pred(regions0),
PageTableOwner::<C>::path_tracked_pred(regions1),
),ensuresother.relate_region(regions1),Sourcepub proof fn relate_region_slot_owners_preserved(
self,
regions0: MetaRegionOwners,
regions1: MetaRegionOwners,
)
pub proof fn relate_region_slot_owners_preserved( self, regions0: MetaRegionOwners, regions1: MetaRegionOwners, )
self.inv(),self.relate_region(regions0),regions0.slot_owners =~= regions1.slot_owners,ensuresself.relate_region(regions1),Transfers relate_region when slot_owners is preserved.
Sourcepub proof fn relate_region_slot_owners_rc_increment(
self,
regions0: MetaRegionOwners,
regions1: MetaRegionOwners,
idx: usize,
)
pub proof fn relate_region_slot_owners_rc_increment( self, regions0: MetaRegionOwners, regions1: MetaRegionOwners, idx: usize, )
self.inv(),self.relate_region(regions0),regions0.inv(),regions1.slots == regions0.slots,regions1.slot_owners.dom() == regions0.slot_owners.dom(),regions1.slot_owners[idx].inner_perms.ref_count.value()
== regions0.slot_owners[idx].inner_perms.ref_count.value() + 1,regions1.slot_owners[idx].inner_perms.ref_count.id()
== regions0.slot_owners[idx].inner_perms.ref_count.id(),regions1.slot_owners[idx].inner_perms.storage
== regions0.slot_owners[idx].inner_perms.storage,regions1.slot_owners[idx].inner_perms.vtable_ptr
== regions0.slot_owners[idx].inner_perms.vtable_ptr,regions1.slot_owners[idx].inner_perms.in_list
== regions0.slot_owners[idx].inner_perms.in_list,regions1.slot_owners[idx].path_if_in_pt == regions0.slot_owners[idx].path_if_in_pt,regions1.slot_owners[idx].self_addr == regions0.slot_owners[idx].self_addr,regions1.slot_owners[idx].raw_count == regions0.slot_owners[idx].raw_count,regions1.slot_owners[idx].usage == regions0.slot_owners[idx].usage,regions1.slot_owners[idx].inner_perms.ref_count.value() != REF_COUNT_UNUSED,forall |i: usize| {
i != idx && regions0.slot_owners.contains_key(i)
==> regions1.slot_owners[i] == regions0.slot_owners[i]
},ensuresself.relate_region(regions1),Sourcepub proof fn relate_region_borrow_slot(
self,
regions0: MetaRegionOwners,
regions1: MetaRegionOwners,
changed_idx: usize,
)
pub proof fn relate_region_borrow_slot( self, regions0: MetaRegionOwners, regions1: MetaRegionOwners, changed_idx: usize, )
self.inv(),self.relate_region(regions0),regions1.inv(),regions0.slot_owners[changed_idx].raw_count == 0,regions1.slot_owners[changed_idx].raw_count == 1,regions1.slot_owners[changed_idx].inner_perms
== regions0.slot_owners[changed_idx].inner_perms,regions1.slot_owners[changed_idx].self_addr
== regions0.slot_owners[changed_idx].self_addr,regions1.slot_owners[changed_idx].usage == regions0.slot_owners[changed_idx].usage,regions1.slot_owners[changed_idx].path_if_in_pt
== regions0.slot_owners[changed_idx].path_if_in_pt,forall |i: usize| i != changed_idx ==> regions0.slot_owners[i] == regions1.slot_owners[i],regions0.slot_owners.dom() =~= regions1.slot_owners.dom(),ensuresself.relate_region(regions1),Transfers relate_region when raw_count changed from 0 to 1 at one index.
Uses map_implies_and with not_in_scope_pred since tree entries have !in_scope.
Sourcepub proof fn cont_entries_relate_region(self, regions: MetaRegionOwners)
pub proof fn cont_entries_relate_region(self, regions: MetaRegionOwners)
self.inv(),self.relate_region(regions),ensuresforall |i: int| {
self.level - 1 <= i < NR_LEVELS
==> {
&&& self.continuations[i].entry_own.relate_region(regions)
&&& PageTableOwner::<
C,
>::path_tracked_pred(
regions,
)(self.continuations[i].entry_own, self.continuations[i].path())
}
},Continuation entry_owns satisfy relate_region and path_tracked_pred.
§Justification
When the cursor descends into a subtree, each continuation’s entry_own
was previously checked by tree_predicate_map in the parent’s child
subtree. After descent, map_full_tree only covers the siblings (the
taken child is None), so the path entries’ properties are no longer
covered by map_full_tree. However, regions is unchanged since
descent, so the properties still hold.
Sourcepub proof fn cursor_path_nesting(self, i: int, j: int)
pub proof fn cursor_path_nesting(self, i: int, j: int)
self.inv(),self.level - 1 <= j < i,i < NR_LEVELS,ensuresself.continuations[j].path().len() as int > self.continuations[i].path().len() as int,self.continuations[j].path().index(self.continuations[i].path().len() as int)
== self.continuations[i].idx,Cursor path nesting: for two continuations where i > j >= self.level - 1,
cont_i is an ancestor of cont_j in the page table tree.
The path from the root to cont_j passes through cont_i.idx at level cont_i,
i.e., cont_j.path()[cont_i.path().len()] == cont_i.idx.
This holds because the cursor was built by descending through cont_i.idx at each level.
Sourcepub open spec fn set_va_spec(self, new_va: AbstractVaddr) -> Self
pub open spec fn set_va_spec(self, new_va: AbstractVaddr) -> Self
{ Self { va: new_va, ..self } }Sourcepub proof fn set_va(tracked &mut self, new_va: AbstractVaddr)
pub proof fn set_va(tracked &mut self, new_va: AbstractVaddr)
forall |i: int| {
old(self).level - 1 <= i < NR_LEVELS ==> new_va.index[i] == old(self).va.index[i]
},forall |i: int| {
old(self).guard_level - 1 <= i < NR_LEVELS
==> new_va.index[i] == old(self).prefix.index[i]
},ensures*self == old(self).set_va_spec(new_va),Sourcepub open spec fn set_va_in_node_spec(self, new_va: AbstractVaddr) -> Self
pub open spec fn set_va_in_node_spec(self, new_va: AbstractVaddr) -> Self
{
let old_cont = self.continuations[self.level - 1];
Self {
va: new_va,
continuations: self
.continuations
.insert(
self.level - 1,
CursorContinuation {
idx: new_va.index[self.level - 1] as usize,
..old_cont
},
),
..self
}
}Sourcepub proof fn set_va_in_node(tracked &mut self, new_va: AbstractVaddr)
pub proof fn set_va_in_node(tracked &mut self, new_va: AbstractVaddr)
old(self).inv(),new_va.inv(),forall |i: int| {
old(self).level <= i < NR_LEVELS ==> new_va.index[i] == old(self).va.index[i]
},old(self).locked_range().start <= new_va.to_vaddr() < old(self).locked_range().end,ensures*self == old(self).set_va_in_node_spec(new_va),self.inv(),When jumping within the same page-table node, only indices at levels
= level are guaranteed to match. The entry-within-node index (level - 1) may change, so we update continuations[level-1].idx along with va.
Sourcepub open spec fn new_spec(
owner_subtree: OwnerSubtree<C>,
idx: usize,
guard_perm: GuardPerm<'rcu, C>,
) -> Self
pub open spec fn new_spec( owner_subtree: OwnerSubtree<C>, idx: usize, guard_perm: GuardPerm<'rcu, C>, ) -> Self
{
let va = AbstractVaddr {
offset: 0,
index: Map::new(|i: int| 0 <= i < NR_LEVELS, |i: int| 0)
.insert(NR_LEVELS - 1, idx as int),
};
Self {
level: NR_LEVELS as PagingLevel,
continuations: Map::empty()
.insert(
NR_LEVELS - 1 as int,
CursorContinuation::new_spec(owner_subtree, idx, guard_perm),
),
va,
guard_level: NR_LEVELS as PagingLevel,
prefix: va,
popped_too_high: false,
}
}Sourcepub proof fn new(tracked
owner_subtree: OwnerSubtree<C>,
idx: usize,
tracked guard_perm: GuardPerm<'rcu, C>,
) -> tracked res : Self
pub proof fn new(tracked owner_subtree: OwnerSubtree<C>, idx: usize, tracked guard_perm: GuardPerm<'rcu, C>, ) -> tracked res : Self
res == Self::new_spec(owner_subtree, idx, guard_perm),Sourcepub proof fn jump_above_locked_range_va_in_node(self, va: Vaddr, node_start: Vaddr)
pub proof fn jump_above_locked_range_va_in_node(self, va: Vaddr, node_start: Vaddr)
self.inv(),self.level == self.guard_level,self.above_locked_range(),self.locked_range().start <= va < self.locked_range().end,node_start
== nat_align_down(
self.va.to_vaddr() as nat,
page_size((self.level + 1) as PagingLevel) as nat,
) as usize,ensuresnode_start <= va,va < node_start + page_size((self.level + 1) as PagingLevel),Sourcepub proof fn jump_not_in_node_level_lt_guard_minus_one(
self,
level: PagingLevel,
va: Vaddr,
node_start: Vaddr,
)
pub proof fn jump_not_in_node_level_lt_guard_minus_one( self, level: PagingLevel, va: Vaddr, node_start: Vaddr, )
self.inv(),self.locked_range().start <= va < self.locked_range().end,1 <= level,level + 1 <= self.guard_level,self.locked_range().start <= node_start,node_start + page_size((level + 1) as PagingLevel) <= self.locked_range().end,!(node_start <= va && va < node_start + page_size((level + 1) as PagingLevel)),ensureslevel + 1 < self.guard_level,Trait Implementations§
Source§impl<'rcu, C: PageTableConfig> Inv for CursorOwner<'rcu, C>
impl<'rcu, C: PageTableConfig> Inv for CursorOwner<'rcu, C>
Source§open spec fn inv(self) -> bool
open spec fn inv(self) -> bool
{
&&& self.va.inv()
&&& 1 <= self.level <= NR_LEVELS
&&& self.guard_level <= NR_LEVELS
&&& C::TOP_LEVEL_INDEX_RANGE_spec().start <= self.va.index[NR_LEVELS - 1]
&&& self.va.index[NR_LEVELS - 1] < C::TOP_LEVEL_INDEX_RANGE_spec().end
&&& self.in_locked_range() || self.above_locked_range()
&&& self.popped_too_high ==> self.level >= self.guard_level && self.in_locked_range()
&&& !self.popped_too_high
==> self.level < self.guard_level || self.above_locked_range()
&&& self.continuations[self.level - 1].all_some()
&&& forall |i: int| {
self.level <= i < NR_LEVELS
==> { (#[trigger] self.continuations[i]).all_but_index_some() }
}
&&& self.prefix.inv()
&&& forall |i: int| i < self.guard_level ==> self.prefix.index[i] == 0
&&& self.level <= self.guard_level
==> forall |i: int| {
self.guard_level <= i < NR_LEVELS
==> self.va.index[i] == self.prefix.index[i]
}
&&& self.level <= 4
==> {
&&& self.continuations.contains_key(3)
&&& self.continuations[3].inv()
&&& self.continuations[3].level() == 4
&&& self.continuations[3].entry_own.parent_level == 5
&&& self.va.index[3] == self.continuations[3].idx
}
&&& self.level <= 3
==> {
&&& self.continuations.contains_key(2)
&&& self.continuations[2].inv()
&&& self.continuations[2].level() == 3
&&& self.continuations[2].entry_own.parent_level == 4
&&& self.va.index[2] == self.continuations[2].idx
&&& self.continuations[2].guard_perm.value().inner.inner@.ptr.addr()
!= self.continuations[3].guard_perm.value().inner.inner@.ptr.addr()
&&& self.continuations[2].path()
== self
.continuations[3]
.path()
.push_tail(self.continuations[3].idx as usize)
}
&&& self.level <= 2
==> {
&&& self.continuations.contains_key(1)
&&& self.continuations[1].inv()
&&& self.continuations[1].level() == 2
&&& self.continuations[1].entry_own.parent_level == 3
&&& self.va.index[1] == self.continuations[1].idx
&&& self.continuations[1].guard_perm.value().inner.inner@.ptr.addr()
!= self.continuations[2].guard_perm.value().inner.inner@.ptr.addr()
&&& self.continuations[1].guard_perm.value().inner.inner@.ptr.addr()
!= self.continuations[3].guard_perm.value().inner.inner@.ptr.addr()
&&& self.continuations[1].path()
== self
.continuations[2]
.path()
.push_tail(self.continuations[2].idx as usize)
}
&&& self.level == 1
==> {
&&& self.continuations.contains_key(0)
&&& self.continuations[0].inv()
&&& self.continuations[0].level() == 1
&&& self.continuations[0].entry_own.parent_level == 2
&&& self.va.index[0] == self.continuations[0].idx
&&& self.continuations[0].guard_perm.value().inner.inner@.ptr.addr()
!= self.continuations[1].guard_perm.value().inner.inner@.ptr.addr()
&&& self.continuations[0].guard_perm.value().inner.inner@.ptr.addr()
!= self.continuations[2].guard_perm.value().inner.inner@.ptr.addr()
&&& self.continuations[0].guard_perm.value().inner.inner@.ptr.addr()
!= self.continuations[3].guard_perm.value().inner.inner@.ptr.addr()
&&& self.continuations[0].path()
== self
.continuations[1]
.path()
.push_tail(self.continuations[1].idx as usize)
}
}