pub struct CursorOwner<'rcu, C: PageTableConfig> {
pub level: PagingLevel,
pub continuations: Map<int, CursorContinuation<'rcu, C>>,
pub va: AbstractVaddr,
pub guard_level: PagingLevel,
pub prefix: AbstractVaddr,
pub popped_too_high: bool,
}Fields§
§level: PagingLevel§continuations: Map<int, CursorContinuation<'rcu, C>>§va: AbstractVaddr§guard_level: PagingLevel§prefix: AbstractVaddr§popped_too_high: boolImplementations§
Source§impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>
impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>
Sourcepub open spec fn max_steps_subtree(level: usize) -> usize
pub open spec fn max_steps_subtree(level: usize) -> usize
{
if level <= 1 {
NR_ENTRIES
} else {
(NR_ENTRIES * (Self::max_steps_subtree((level - 1) as usize) + 1)) as usize
}
}The number of steps it will take to walk through every node of a full
page table at level level
Sourcepub open spec fn max_steps_partial(self, level: usize) -> usize
pub open spec fn max_steps_partial(self, level: usize) -> usize
{
if level == NR_LEVELS {
0
} else {
let cont = self.continuations[(level - 1) as int];
let steps = Self::max_steps_subtree(level) * (NR_ENTRIES - cont.idx);
let remaining_steps = self.max_steps_partial((level + 1) as usize);
(steps + remaining_steps) as usize
}
}The number of steps it will take to walk through the remaining entries of the page table starting at the given level.
Sourcepub proof fn max_steps_partial_inv(self, other: Self, level: usize)
pub proof fn max_steps_partial_inv(self, other: Self, level: usize)
self.inv(),other.inv(),self.level == other.level,self.level <= level <= NR_LEVELS,forall |i: int| {
self.level - 1 <= i < NR_LEVELS
==> self.continuations[i].idx == other.continuations[i].idx
},ensuresself.max_steps_partial(level) == other.max_steps_partial(level),Sourcepub open spec fn push_level_owner_spec(self, guard_perm: GuardPerm<'rcu, C>) -> Self
pub open spec fn push_level_owner_spec(self, guard_perm: GuardPerm<'rcu, C>) -> Self
{
let cont = self.continuations[self.level - 1];
let (child, cont) = cont
.make_cont_spec(self.va.index[self.level - 2] as usize, guard_perm);
let new_continuations = self.continuations.insert(self.level - 1, cont);
let new_continuations = new_continuations.insert(self.level - 2, child);
let new_level = (self.level - 1) as u8;
Self {
continuations: new_continuations,
level: new_level,
popped_too_high: false,
..self
}
}Sourcepub proof fn push_level_owner_decreases_steps(self, guard_perm: GuardPerm<'rcu, C>)
pub proof fn push_level_owner_decreases_steps(self, guard_perm: GuardPerm<'rcu, C>)
self.inv(),self.level > 0,ensuresself.push_level_owner_spec(guard_perm).max_steps() < self.max_steps(),Sourcepub proof fn push_level_owner_preserves_va(self, guard_perm: GuardPerm<'rcu, C>)
pub proof fn push_level_owner_preserves_va(self, guard_perm: GuardPerm<'rcu, C>)
self.inv(),self.level > 1,ensuresself.push_level_owner_spec(guard_perm).va == self.va,self.push_level_owner_spec(guard_perm).continuations[self.level - 2].idx
== self.va.index[self.level - 2],Sourcepub proof fn push_level_owner_preserves_mappings(self, guard_perm: GuardPerm<'rcu, C>)
pub proof fn push_level_owner_preserves_mappings(self, guard_perm: GuardPerm<'rcu, C>)
self.inv(),self.level > 1,ensuresself.push_level_owner_spec(guard_perm)@.mappings == self@.mappings,Sourcepub proof fn push_level_owner_preserves_inv(self, guard_perm: GuardPerm<'rcu, C>)
pub proof fn push_level_owner_preserves_inv(self, guard_perm: GuardPerm<'rcu, C>)
self.inv(),self.level > 1,forall |i: int| {
self.level - 1 <= i < NR_LEVELS
==> self.continuations[i].guard_perm.addr() != guard_perm.addr()
},ensuresself.push_level_owner_spec(guard_perm).inv(),Sourcepub proof fn push_level_owner_preserves_invs(
self,
guard_perm: GuardPerm<'rcu, C>,
regions: MetaRegionOwners,
guards: Guards<'rcu, C>,
)
pub proof fn push_level_owner_preserves_invs( self, guard_perm: GuardPerm<'rcu, C>, regions: MetaRegionOwners, guards: Guards<'rcu, C>, )
self.inv(),self.level > 1,self.only_current_locked(guards),self.nodes_locked(guards),self.relate_region(regions),guards.lock_held(guard_perm.value().inner.inner@.ptr.addr()),ensuresself.push_level_owner_spec(guard_perm).inv(),self.push_level_owner_spec(guard_perm).children_not_locked(guards),self.push_level_owner_spec(guard_perm).nodes_locked(guards),self.push_level_owner_spec(guard_perm).relate_region(regions),Sourcepub proof fn push_level_owner(tracked &mut self, tracked guard_perm: Tracked<GuardPerm<'rcu, C>>)
pub proof fn push_level_owner(tracked &mut self, tracked guard_perm: Tracked<GuardPerm<'rcu, C>>)
old(self).inv(),old(self).level > 1,ensures*self == old(self).push_level_owner_spec(guard_perm@),Sourcepub open spec fn pop_level_owner_spec(self) -> (Self, GuardPerm<'rcu, C>)
pub open spec fn pop_level_owner_spec(self) -> (Self, GuardPerm<'rcu, C>)
{
let child = self.continuations[self.level - 1];
let cont = self.continuations[self.level as int];
let (new_cont, guard_perm) = cont.restore_spec(child);
let new_continuations = self.continuations.insert(self.level as int, new_cont);
let new_continuations = new_continuations.remove(self.level - 1);
let new_level = (self.level + 1) as u8;
let popped_too_high = if new_level >= self.guard_level { true } else { false };
(
Self {
continuations: new_continuations,
level: new_level,
popped_too_high: popped_too_high,
..self
},
guard_perm,
)
}Sourcepub proof fn pop_level_owner_preserves_inv(self)
pub proof fn pop_level_owner_preserves_inv(self)
self.inv(),self.level < NR_LEVELS,self.in_locked_range(),ensuresself.pop_level_owner_spec().0.inv(),Sourcepub proof fn pop_level_owner_preserves_invs(
self,
guards: Guards<'rcu, C>,
regions: MetaRegionOwners,
)
pub proof fn pop_level_owner_preserves_invs( self, guards: Guards<'rcu, C>, regions: MetaRegionOwners, )
self.inv(),self.level < NR_LEVELS,self.in_locked_range(),self.children_not_locked(guards),self.nodes_locked(guards),self.relate_region(regions),ensuresself.pop_level_owner_spec().0.in_locked_range(),self.pop_level_owner_spec().0.inv(),self.pop_level_owner_spec().0.only_current_locked(guards),self.pop_level_owner_spec().0.nodes_locked(guards),self.pop_level_owner_spec().0.relate_region(regions),Sourcepub proof fn set_va_preserves_inv(self, new_va: AbstractVaddr)
pub proof fn set_va_preserves_inv(self, new_va: AbstractVaddr)
self.inv(),new_va.inv(),forall |i: int| self.level - 1 <= i < NR_LEVELS ==> new_va.index[i] == self.va.index[i],forall |i: int| {
self.guard_level - 1 <= i < NR_LEVELS ==> new_va.index[i] == self.prefix.index[i]
},ensuresself.set_va_spec(new_va).inv(),Update va to a new value that shares the same indices at levels >= self.level. This preserves invariants because:
- The new va satisfies va.inv()
- The indices at levels >= level match the continuation indices
- in_locked_range/above_locked_range depend on va but the preconditions ensure consistency
Sourcepub proof fn pop_level_owner(tracked &mut self) -> tracked guard_perm : GuardPerm<'rcu, C>
pub proof fn pop_level_owner(tracked &mut self) -> tracked guard_perm : GuardPerm<'rcu, C>
old(self).inv(),old(self).level < NR_LEVELS,ensures*self == old(self).pop_level_owner_spec().0,guard_perm == old(self).pop_level_owner_spec().1,Sourcepub open spec fn move_forward_owner_spec(self) -> Self
pub open spec fn move_forward_owner_spec(self) -> Self
self.inv(),self.level < NR_LEVELS,self.in_locked_range(),{
if self.index() + 1 < NR_ENTRIES {
self.inc_index().zero_below_level()
} else if self.level < NR_LEVELS {
self.pop_level_owner_spec().0.move_forward_owner_spec()
} else {
Self {
popped_too_high: false,
..self
}
}
}Sourcepub proof fn move_forward_increases_va(self)
pub proof fn move_forward_increases_va(self)
self.inv(),self.level <= NR_LEVELS,self.in_locked_range(),ensuresself.move_forward_owner_spec().va.to_vaddr() > self.va.to_vaddr(),Sourcepub proof fn move_forward_not_popped_too_high(self)
pub proof fn move_forward_not_popped_too_high(self)
self.inv(),self.level <= NR_LEVELS,self.in_locked_range(),ensures!self.move_forward_owner_spec().popped_too_high,Sourcepub proof fn move_forward_owner_decreases_steps(self)
pub proof fn move_forward_owner_decreases_steps(self)
self.inv(),self.level <= NR_LEVELS,ensuresself.move_forward_owner_spec().max_steps() < self.max_steps(),Sourcepub proof fn move_forward_va_is_align_up(self)
pub proof fn move_forward_va_is_align_up(self)
self.inv(),self.level <= NR_LEVELS,self.in_locked_range(),ensuresself.move_forward_owner_spec().va == self.va.align_up(self.level as int),Sourcepub proof fn move_forward_owner_preserves_mappings(self)
pub proof fn move_forward_owner_preserves_mappings(self)
self.inv(),self.level > 1,ensuresself.move_forward_owner_spec()@.mappings == self@.mappings,Source§impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>
impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>
Sourcepub open spec fn node_unlocked(
guards: Guards<'rcu, C>,
) -> FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>
pub open spec fn node_unlocked( guards: Guards<'rcu, C>, ) -> FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>
{
|owner: EntryOwner<C>, path: TreePath<NR_ENTRIES>| {
owner.is_node() ==> guards.unlocked(owner.node.unwrap().meta_perm.addr())
}
}Sourcepub open spec fn node_unlocked_except(
guards: Guards<'rcu, C>,
addr: usize,
) -> FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>
pub open spec fn node_unlocked_except( guards: Guards<'rcu, C>, addr: usize, ) -> FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>
{
|owner: EntryOwner<C>, path: TreePath<NR_ENTRIES>| {
owner.is_node()
==> (owner.node.unwrap().meta_perm.addr() != addr
==> guards.unlocked(owner.node.unwrap().meta_perm.addr()))
}
}Sourcepub open spec fn map_full_tree(
self,
f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>,
) -> bool
pub open spec fn map_full_tree( self, f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>, ) -> bool
{
forall |i: int| {
self.level - 1 <= i < NR_LEVELS ==> { self.continuations[i].map_children(f) }
}
}Sourcepub open spec fn map_only_children(
self,
f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>,
) -> bool
pub open spec fn map_only_children( self, f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>, ) -> bool
{
forall |i: int| {
self.level - 1 <= i < NR_LEVELS ==> self.continuations[i].map_children(f)
}
}Sourcepub open spec fn children_not_locked(self, guards: Guards<'rcu, C>) -> bool
pub open spec fn children_not_locked(self, guards: Guards<'rcu, C>) -> bool
{ self.map_only_children(Self::node_unlocked(guards)) }Sourcepub open spec fn only_current_locked(self, guards: Guards<'rcu, C>) -> bool
pub open spec fn only_current_locked(self, guards: Guards<'rcu, C>) -> bool
{
self.map_only_children(
Self::node_unlocked_except(
guards,
self.cur_entry_owner().node.unwrap().meta_perm.addr(),
),
)
}Sourcepub proof fn never_drop_restores_children_not_locked(
self,
guard: PageTableGuard<'rcu, C>,
guards0: Guards<'rcu, C>,
guards1: Guards<'rcu, C>,
)
pub proof fn never_drop_restores_children_not_locked( self, guard: PageTableGuard<'rcu, C>, guards0: Guards<'rcu, C>, guards1: Guards<'rcu, C>, )
self.inv(),self.only_current_locked(guards0),<PageTableGuard<'rcu, C> as TrackDrop>::constructor_requires(guard, guards0),<PageTableGuard<'rcu, C> as TrackDrop>::constructor_ensures(guard, guards0, guards1),ensuresself.children_not_locked(guards1),Sourcepub proof fn map_children_implies(
self,
f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>,
g: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>,
)
pub proof fn map_children_implies( self, f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>, g: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>, )
self.inv(),OwnerSubtree::implies(f, g),forall |i: int| self.level - 1 <= i < NR_LEVELS ==> self.continuations[i].map_children(f),ensuresforall |i: int| self.level - 1 <= i < NR_LEVELS ==> self.continuations[i].map_children(g),Sourcepub open spec fn nodes_locked(self, guards: Guards<'rcu, C>) -> bool
pub open spec fn nodes_locked(self, guards: Guards<'rcu, C>) -> bool
{
forall |i: int| {
self.level - 1 <= i < NR_LEVELS ==> { self.continuations[i].node_locked(guards) }
}
}Sourcepub open spec fn inc_index(self) -> Self
pub open spec fn inc_index(self) -> Self
{
Self {
continuations: self
.continuations
.insert(self.level - 1, self.continuations[self.level - 1].inc_index()),
va: AbstractVaddr {
offset: self.va.offset,
index: self
.va
.index
.insert(
self.level - 1,
self.continuations[self.level - 1].inc_index().idx as int,
),
},
popped_too_high: false,
..self
}
}Sourcepub open spec fn zero_below_level_rec(self, level: PagingLevel) -> Self
pub open spec fn zero_below_level_rec(self, level: PagingLevel) -> Self
{
if self.level <= level {
self
} else {
Self {
va: AbstractVaddr {
offset: self.va.offset,
index: self.va.index.insert(level - 1, 0),
},
..self.zero_below_level_rec((level + 1) as u8)
}
}
}Sourcepub open spec fn zero_below_level(self) -> Self
pub open spec fn zero_below_level(self) -> Self
{ self.zero_below_level_rec(1u8) }Sourcepub proof fn zero_below_level_rec_preserves_above(self, level: PagingLevel)
pub proof fn zero_below_level_rec_preserves_above(self, level: PagingLevel)
forall |lv: int| {
lv >= self.level
==> self.zero_below_level_rec(level).va.index[lv] == #[trigger] self.va.index[lv]
},Sourcepub proof fn zero_preserves_above(self)
pub proof fn zero_preserves_above(self)
forall |lv: int| {
lv >= self.level
==> self.zero_below_level().va.index[lv] == #[trigger] self.va.index[lv]
},Sourcepub proof fn do_zero_below_level(tracked &mut self)
pub proof fn do_zero_below_level(tracked &mut self)
old(self).inv(),ensures*self == old(self).zero_below_level(),Sourcepub proof fn do_inc_index(tracked &mut self)
pub proof fn do_inc_index(tracked &mut self)
old(self).inv(),old(self).continuations[old(self).level - 1].idx + 1 < NR_ENTRIES,old(self).in_locked_range(),ensuresself.inv(),*self == old(self).inc_index(),Sourcepub proof fn inc_and_zero_increases_va(self)
pub proof fn inc_and_zero_increases_va(self)
self.inv(),ensuresself.inc_index().zero_below_level().va.to_vaddr() > self.va.to_vaddr(),Sourcepub proof fn zero_rec_preserves_all_but_va(self, level: PagingLevel)
pub proof fn zero_rec_preserves_all_but_va(self, level: PagingLevel)
self.zero_below_level_rec(level).level == self.level,self.zero_below_level_rec(level).continuations == self.continuations,self.zero_below_level_rec(level).guard_level == self.guard_level,self.zero_below_level_rec(level).prefix == self.prefix,self.zero_below_level_rec(level).popped_too_high == self.popped_too_high,Sourcepub proof fn zero_preserves_all_but_va(self)
pub proof fn zero_preserves_all_but_va(self)
self.zero_below_level().level == self.level,self.zero_below_level().continuations == self.continuations,self.zero_below_level().guard_level == self.guard_level,self.zero_below_level().prefix == self.prefix,self.zero_below_level().popped_too_high == self.popped_too_high,Sourcepub open spec fn cur_va_range(self) -> Range<AbstractVaddr>
pub open spec fn cur_va_range(self) -> Range<AbstractVaddr>
{
let start = self.va.align_down(self.level as int);
let end = self.va.align_up(self.level as int);
Range { start, end }
}Sourcepub proof fn cur_va_range_reflects_view(self)
pub proof fn cur_va_range_reflects_view(self)
self.inv(),ensuresself.cur_va_range().start.reflect(self@.query_range().start),self.cur_va_range().end.reflect(self@.query_range().end),Sourcepub proof fn cur_va_in_subtree_range(self)
pub proof fn cur_va_in_subtree_range(self)
self.inv(),ensuresvaddr(self.cur_subtree().value.path) <= self.cur_va()
< vaddr(self.cur_subtree().value.path) + page_size((self.level - 1) as PagingLevel),The current virtual address falls within the VA range of the current subtree’s path. This follows from the invariant that va.index matches the continuation indices.
Sourcepub proof fn cur_subtree_eq_filtered_mappings(self)
pub proof fn cur_subtree_eq_filtered_mappings(self)
self.inv(),ensuresPageTableOwner(self.cur_subtree())@.mappings
== self@
.mappings
.filter(|m: Mapping| (
self@.cur_va <= m.va_range.start < self@.cur_va + page_size(self.level)
)),The mappings in the current subtree are exactly those mappings whose VA range starts within [cur_va, cur_va + page_size(level)). This connects the page table representation to the cursor view’s remove_subtree operation. At cursor level L, the current entry manages a VA region of size page_size(L).
Sourcepub proof fn subtree_va_ranges_disjoint(self, j: int)
pub proof fn subtree_va_ranges_disjoint(self, j: int)
self.inv(),0 <= j < NR_ENTRIES,j != self.index(),self.continuations[self.level - 1].children[j] is Some,ensuresvaddr(self.continuations[self.level - 1].path().push_tail(j as usize))
+ page_size((self.level - 1) as PagingLevel) <= self.cur_va()
|| self.cur_va()
< vaddr(self.continuations[self.level - 1].path().push_tail(j as usize)),Subtrees at different indices have disjoint VA ranges.
Sourcepub proof fn higher_level_children_disjoint(self, i: int, j: int)
pub proof fn higher_level_children_disjoint(self, i: int, j: int)
self.inv(),self.level - 1 < i < NR_LEVELS - 1,0 <= j < NR_ENTRIES,j != self.continuations[i].idx,self.continuations[i].children[j] is Some,ensuresvaddr(self.continuations[i].path().push_tail(j as usize)) + page_size(i as PagingLevel)
<= self.cur_va()
|| self.cur_va() < vaddr(self.continuations[i].path().push_tail(j as usize)),Children of higher-level continuations have VA ranges that don’t include cur_va, because cur_va’s indices at those levels match the path to the current position.
Sourcepub proof fn mapping_covering_cur_va_from_cur_subtree(self, m: Mapping)
pub proof fn mapping_covering_cur_va_from_cur_subtree(self, m: Mapping)
self.inv(),self.view_mappings().contains(m),m.va_range.start <= self.cur_va() < m.va_range.end,ensuresPageTableOwner(self.cur_subtree()).view_rec(self.cur_subtree().value.path).contains(m),Any mapping that covers cur_va must come from the current subtree. This follows from the disjointness of VA ranges and the fact that cur_va falls within the current subtree’s VA range.
Sourcepub proof fn inv_continuation(self, i: int)
pub proof fn inv_continuation(self, i: int)
self.inv(),self.level - 1 <= i <= NR_LEVELS - 1,ensuresself.continuations.contains_key(i),self.continuations[i].inv(),self.continuations[i].children.len() == NR_ENTRIES,Sourcepub open spec fn view_mappings(self) -> Set<Mapping>
pub open spec fn view_mappings(self) -> Set<Mapping>
{
Set::new(|m: Mapping| {
exists |i: int| {
self.level - 1 <= i < NR_LEVELS - 1
&& self.continuations[i].view_mappings().contains(m)
}
})
}Sourcepub proof fn view_mappings_take_lowest(self, new: Self)
pub proof fn view_mappings_take_lowest(self, new: Self)
self.inv(),new.continuations == self.continuations.remove(self.level - 1),ensuresnew.view_mappings()
== self.view_mappings() - self.continuations[self.level - 1].view_mappings(),Sourcepub proof fn view_mappings_put_lowest(
self,
new: Self,
cont: CursorContinuation<'_, C>,
)
pub proof fn view_mappings_put_lowest( self, new: Self, cont: CursorContinuation<'_, C>, )
cont.inv(),new.inv(),new.continuations == self.continuations.insert(self.level - 1, cont),ensuresnew.view_mappings() == self.view_mappings() + cont.view_mappings(),Sourcepub open spec fn as_page_table_owner(self) -> PageTableOwner<C>
pub open spec fn as_page_table_owner(self) -> PageTableOwner<C>
{
if self.level == 1 {
let l1 = self.continuations[0];
let l2 = self.continuations[1].restore_spec(l1).0;
let l3 = self.continuations[2].restore_spec(l2).0;
let l4 = self.continuations[3].restore_spec(l3).0;
l4.as_page_table_owner()
} else if self.level == 2 {
let l2 = self.continuations[1];
let l3 = self.continuations[2].restore_spec(l2).0;
let l4 = self.continuations[3].restore_spec(l3).0;
l4.as_page_table_owner()
} else if self.level == 3 {
let l3 = self.continuations[2];
let l4 = self.continuations[3].restore_spec(l3).0;
l4.as_page_table_owner()
} else {
let l4 = self.continuations[3];
l4.as_page_table_owner()
}
}Sourcepub proof fn as_page_table_owner_preserves_view_mappings(self)
pub proof fn as_page_table_owner_preserves_view_mappings(self)
self.inv(),ensuresself.as_page_table_owner().view_rec(self.continuations[3].path()) == self.view_mappings(),Sourcepub open spec fn cur_entry_owner(self) -> EntryOwner<C>
pub open spec fn cur_entry_owner(self) -> EntryOwner<C>
{ self.cur_subtree().value }Sourcepub open spec fn cur_subtree(self) -> OwnerSubtree<C>
pub open spec fn cur_subtree(self) -> OwnerSubtree<C>
{ self.continuations[self.level - 1].children[self.index() as int].unwrap() }Sourcepub open spec fn locked_range(self) -> Range<Vaddr>
pub open spec fn locked_range(self) -> Range<Vaddr>
{
let start = self.prefix.align_down(self.guard_level as int).to_vaddr();
let end = self.prefix.align_up(self.guard_level as int).to_vaddr();
Range { start, end }
}Sourcepub open spec fn in_locked_range(self) -> bool
pub open spec fn in_locked_range(self) -> bool
{ self.locked_range().start <= self.va.to_vaddr() < self.locked_range().end }Sourcepub open spec fn above_locked_range(self) -> bool
pub open spec fn above_locked_range(self) -> bool
{ self.va.to_vaddr() >= self.locked_range().end }Sourcepub proof fn prefix_in_locked_range(self)
pub proof fn prefix_in_locked_range(self)
forall |i: int| i >= self.guard_level ==> self.va.index[i] == self.prefix.index[i],ensuresself.in_locked_range(),Sourcepub proof fn in_locked_range_level_lt_nr_levels(self)
pub proof fn in_locked_range_level_lt_nr_levels(self)
self.inv(),self.in_locked_range(),!self.popped_too_high,ensuresself.level < NR_LEVELS,When in_locked_range and !popped_too_high, level < guard_level (from inv), hence level < NR_LEVELS.
Sourcepub proof fn node_within_locked_range(self, level: PagingLevel)
pub proof fn node_within_locked_range(self, level: PagingLevel)
self.in_locked_range(),1 <= level < self.guard_level,self.va.inv(),ensuresself.locked_range().start
<= nat_align_down(
self.va.to_vaddr() as nat,
page_size((level + 1) as PagingLevel) as nat,
) as usize,nat_align_down(self.va.to_vaddr() as nat, page_size((level + 1) as PagingLevel) as nat)
as usize + page_size((level + 1) as PagingLevel) <= self.locked_range().end,If in_locked_range() and level < guard_level, then:
- va.align_down(page_size(level+1)) >= locked_range().start
- va.align_down(page_size(level+1)) + page_size(level+1) <= locked_range().end
This follows from the fact that locked_range().start is aligned to page_size(guard_level), and page_size(guard_level) >= page_size(level+1) when guard_level > level. Therefore locked_range().start is also aligned to page_size(level+1).
Sourcepub proof fn cur_entry_absent_not_present(self)
pub proof fn cur_entry_absent_not_present(self)
self.inv(),self.cur_entry_owner().is_absent(),ensures!self@.present(),Proves that if the current entry is absent, then there is no mapping at the current virtual address. This follows from the page table structure:
- cur_va falls within the VA range of cur_subtree()
- An absent entry contributes no mappings (view_rec returns empty set)
- Mappings from other subtrees have disjoint VA ranges
Sourcepub proof fn cur_entry_frame_present(self)
pub proof fn cur_entry_frame_present(self)
self.inv(),self.cur_entry_owner().is_frame(),ensuresself@.present(),self@
.query(
self.cur_entry_owner().frame.unwrap().mapped_pa,
self.cur_entry_owner().frame.unwrap().size,
self.cur_entry_owner().frame.unwrap().prop,
),Sourcepub open spec fn relate_region(self, regions: MetaRegionOwners) -> bool
pub open spec fn relate_region(self, regions: MetaRegionOwners) -> bool
{
&&& self
.map_full_tree(|entry_owner: EntryOwner<C>, path: TreePath<NR_ENTRIES>| {
entry_owner.relate_region(regions)
})
&&& self.map_full_tree(PageTableOwner::<C>::path_tracked_pred(regions))
}Sourcepub open spec fn not_in_tree(self, owner: EntryOwner<C>) -> bool
pub open spec fn not_in_tree(self, owner: EntryOwner<C>) -> bool
{
self.map_full_tree(|owner0: EntryOwner<C>, path: TreePath<NR_ENTRIES>| {
owner0.meta_slot_paddr_neq(owner)
})
}Sourcepub proof fn absent_not_in_tree(self, owner: EntryOwner<C>)
pub proof fn absent_not_in_tree(self, owner: EntryOwner<C>)
self.inv(),owner.is_absent(),ensuresself.not_in_tree(owner),Sourcepub proof fn relate_region_preserved(
self,
other: Self,
regions0: MetaRegionOwners,
regions1: MetaRegionOwners,
)
pub proof fn relate_region_preserved( self, other: Self, regions0: MetaRegionOwners, regions1: MetaRegionOwners, )
self.inv(),self.relate_region(regions0),self.level == other.level,self.continuations =~= other.continuations,OwnerSubtree::implies(
PageTableOwner::<C>::relate_region_pred(regions0),
PageTableOwner::<C>::relate_region_pred(regions1),
),OwnerSubtree::implies(
PageTableOwner::<C>::path_tracked_pred(regions0),
PageTableOwner::<C>::path_tracked_pred(regions1),
),ensuresother.relate_region(regions1),Sourcepub open spec fn set_va_spec(self, new_va: AbstractVaddr) -> Self
pub open spec fn set_va_spec(self, new_va: AbstractVaddr) -> Self
{ Self { va: new_va, ..self } }Sourcepub proof fn set_va(tracked &mut self, new_va: AbstractVaddr)
pub proof fn set_va(tracked &mut self, new_va: AbstractVaddr)
forall |i: int| {
old(self).level - 1 <= i < NR_LEVELS ==> new_va.index[i] == old(self).va.index[i]
},forall |i: int| {
old(self).guard_level - 1 <= i < NR_LEVELS
==> new_va.index[i] == old(self).prefix.index[i]
},ensures*self == old(self).set_va_spec(new_va),Trait Implementations§
Source§impl<'rcu, C: PageTableConfig> Inv for CursorOwner<'rcu, C>
impl<'rcu, C: PageTableConfig> Inv for CursorOwner<'rcu, C>
Source§open spec fn inv(self) -> bool
open spec fn inv(self) -> bool
{
&&& self.va.inv()
&&& 1 <= self.level <= NR_LEVELS
&&& self.guard_level <= NR_LEVELS
&&& self.popped_too_high ==> self.level >= self.guard_level && self.in_locked_range()
&&& !self.popped_too_high
==> self.level < self.guard_level || self.above_locked_range()
&&& self.continuations[self.level - 1].all_some()
&&& forall |i: int| {
self.level <= i < NR_LEVELS
==> { (#[trigger] self.continuations[i]).all_but_index_some() }
}
&&& self.prefix.inv()
&&& forall |i: int| i < self.guard_level ==> self.prefix.index[i] == 0
&&& self.level <= 4
==> {
&&& self.continuations.contains_key(3)
&&& self.continuations[3].inv()
&&& self.continuations[3].level() == 4
&&& self.continuations[3].entry_own.parent_level == 5
&&& self.va.index[3] == self.continuations[3].idx
}
&&& self.level <= 3
==> {
&&& self.continuations.contains_key(2)
&&& self.continuations[2].inv()
&&& self.continuations[2].level() == 3
&&& self.continuations[2].entry_own.parent_level == 4
&&& self.va.index[2] == self.continuations[2].idx
&&& self.continuations[2].guard_perm.value().inner.inner@.ptr.addr()
!= self.continuations[3].guard_perm.value().inner.inner@.ptr.addr()
}
&&& self.level <= 2
==> {
&&& self.continuations.contains_key(1)
&&& self.continuations[1].inv()
&&& self.continuations[1].level() == 2
&&& self.continuations[1].entry_own.parent_level == 3
&&& self.va.index[1] == self.continuations[1].idx
&&& self.continuations[1].guard_perm.value().inner.inner@.ptr.addr()
!= self.continuations[2].guard_perm.value().inner.inner@.ptr.addr()
&&& self.continuations[1].guard_perm.value().inner.inner@.ptr.addr()
!= self.continuations[3].guard_perm.value().inner.inner@.ptr.addr()
}
&&& self.level == 1
==> {
&&& self.continuations.contains_key(0)
&&& self.continuations[0].inv()
&&& self.continuations[0].level() == 1
&&& self.continuations[0].entry_own.parent_level == 2
&&& self.va.index[0] == self.continuations[0].idx
&&& self.continuations[0].guard_perm.value().inner.inner@.ptr.addr()
!= self.continuations[1].guard_perm.value().inner.inner@.ptr.addr()
}
}