CursorOwner

Struct CursorOwner 

Source
pub struct CursorOwner<'rcu, C: PageTableConfig> {
    pub level: PagingLevel,
    pub continuations: Map<int, CursorContinuation<'rcu, C>>,
    pub va: AbstractVaddr,
    pub guard_level: PagingLevel,
    pub prefix: AbstractVaddr,
    pub popped_too_high: bool,
}

Fields§

§level: PagingLevel§continuations: Map<int, CursorContinuation<'rcu, C>>§va: AbstractVaddr§guard_level: PagingLevel§prefix: AbstractVaddr§popped_too_high: bool

Implementations§

Source§

impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>

Source

pub open spec fn max_steps_subtree(level: usize) -> usize

{
    if level <= 1 {
        NR_ENTRIES
    } else {
        (NR_ENTRIES * (Self::max_steps_subtree((level - 1) as usize) + 1)) as usize
    }
}

The number of steps it will take to walk through every node of a full page table at level level

Source

pub open spec fn max_steps_partial(self, level: usize) -> usize

{
    if level == NR_LEVELS {
        0
    } else {
        let cont = self.continuations[(level - 1) as int];
        let steps = Self::max_steps_subtree(level) * (NR_ENTRIES - cont.idx);
        let remaining_steps = self.max_steps_partial((level + 1) as usize);
        (steps + remaining_steps) as usize
    }
}

The number of steps it will take to walk through the remaining entries of the page table starting at the given level.

Source

pub open spec fn max_steps(self) -> usize

{ self.max_steps_partial(self.level as usize) }
Source

pub proof fn max_steps_partial_inv(self, other: Self, level: usize)

requires
self.inv(),
other.inv(),
self.level == other.level,
self.level <= level <= NR_LEVELS,
forall |i: int| {
    self.level - 1 <= i < NR_LEVELS
        ==> self.continuations[i].idx == other.continuations[i].idx
},
ensures
self.max_steps_partial(level) == other.max_steps_partial(level),
Source

pub open spec fn push_level_owner_spec(self, guard_perm: GuardPerm<'rcu, C>) -> Self

{
    let cont = self.continuations[self.level - 1];
    let (child, cont) = cont
        .make_cont_spec(self.va.index[self.level - 2] as usize, guard_perm);
    let new_continuations = self.continuations.insert(self.level - 1, cont);
    let new_continuations = new_continuations.insert(self.level - 2, child);
    let new_level = (self.level - 1) as u8;
    Self {
        continuations: new_continuations,
        level: new_level,
        popped_too_high: false,
        ..self
    }
}
Source

pub proof fn push_level_owner_decreases_steps(self, guard_perm: GuardPerm<'rcu, C>)

requires
self.inv(),
self.level > 0,
ensures
self.push_level_owner_spec(guard_perm).max_steps() < self.max_steps(),
Source

pub proof fn push_level_owner_preserves_va(self, guard_perm: GuardPerm<'rcu, C>)

requires
self.inv(),
self.level > 1,
ensures
self.push_level_owner_spec(guard_perm).va == self.va,
self.push_level_owner_spec(guard_perm).continuations[self.level - 2].idx
    == self.va.index[self.level - 2],
Source

pub proof fn push_level_owner_preserves_mappings(self, guard_perm: GuardPerm<'rcu, C>)

requires
self.inv(),
self.level > 1,
ensures
self.push_level_owner_spec(guard_perm)@.mappings == self@.mappings,
Source

pub proof fn push_level_owner_preserves_inv(self, guard_perm: GuardPerm<'rcu, C>)

requires
self.inv(),
self.level > 1,
forall |i: int| {
    self.level - 1 <= i < NR_LEVELS
        ==> self.continuations[i].guard_perm.addr() != guard_perm.addr()
},
ensures
self.push_level_owner_spec(guard_perm).inv(),
Source

pub proof fn push_level_owner_preserves_invs( self, guard_perm: GuardPerm<'rcu, C>, regions: MetaRegionOwners, guards: Guards<'rcu, C>, )

requires
self.inv(),
self.level > 1,
self.only_current_locked(guards),
self.nodes_locked(guards),
self.relate_region(regions),
guards.lock_held(guard_perm.value().inner.inner@.ptr.addr()),
ensures
self.push_level_owner_spec(guard_perm).inv(),
self.push_level_owner_spec(guard_perm).children_not_locked(guards),
self.push_level_owner_spec(guard_perm).nodes_locked(guards),
self.push_level_owner_spec(guard_perm).relate_region(regions),
Source

pub proof fn push_level_owner(tracked &mut self, tracked guard_perm: Tracked<GuardPerm<'rcu, C>>)

requires
old(self).inv(),
old(self).level > 1,
ensures
*self == old(self).push_level_owner_spec(guard_perm@),
Source

pub open spec fn pop_level_owner_spec(self) -> (Self, GuardPerm<'rcu, C>)

{
    let child = self.continuations[self.level - 1];
    let cont = self.continuations[self.level as int];
    let (new_cont, guard_perm) = cont.restore_spec(child);
    let new_continuations = self.continuations.insert(self.level as int, new_cont);
    let new_continuations = new_continuations.remove(self.level - 1);
    let new_level = (self.level + 1) as u8;
    let popped_too_high = if new_level >= self.guard_level { true } else { false };
    (
        Self {
            continuations: new_continuations,
            level: new_level,
            popped_too_high: popped_too_high,
            ..self
        },
        guard_perm,
    )
}
Source

pub proof fn pop_level_owner_preserves_inv(self)

requires
self.inv(),
self.level < NR_LEVELS,
ensures
self.pop_level_owner_spec().0.inv(),
Source

pub proof fn pop_level_owner_preserves_invs( self, guards: Guards<'rcu, C>, regions: MetaRegionOwners, )

requires
self.inv(),
self.level < NR_LEVELS,
self.children_not_locked(guards),
self.nodes_locked(guards),
self.relate_region(regions),
ensures
self.pop_level_owner_spec().0.inv(),
self.pop_level_owner_spec().0.only_current_locked(guards),
self.pop_level_owner_spec().0.nodes_locked(guards),
self.pop_level_owner_spec().0.relate_region(regions),
Source

pub proof fn set_va_preserves_inv(self, new_va: AbstractVaddr)

requires
self.inv(),
new_va.inv(),
forall |i: int| self.level - 1 <= i < NR_LEVELS ==> new_va.index[i] == self.va.index[i],
forall |i: int| {
    self.guard_level - 1 <= i < NR_LEVELS ==> new_va.index[i] == self.prefix.index[i]
},
ensures
self.set_va_spec(new_va).inv(),

Update va to a new value that shares the same indices at levels >= self.level. This preserves invariants because:

  1. The new va satisfies va.inv()
  2. The indices at levels >= level match the continuation indices
  3. in_locked_range/above_locked_range depend on va but the preconditions ensure consistency
Source

pub proof fn pop_level_owner(tracked &mut self) -> tracked guard_perm : GuardPerm<'rcu, C>

requires
old(self).inv(),
old(self).level < NR_LEVELS,
ensures
*self == old(self).pop_level_owner_spec().0,
guard_perm == old(self).pop_level_owner_spec().1,
Source

pub open spec fn move_forward_owner_spec(self) -> Self

recommends
self.inv(),
self.level < NR_LEVELS,
self.in_locked_range(),
{
    if self.index() + 1 < NR_ENTRIES {
        self.inc_index().zero_below_level()
    } else if self.level < NR_LEVELS {
        self.pop_level_owner_spec().0.move_forward_owner_spec()
    } else {
        Self {
            popped_too_high: false,
            ..self
        }
    }
}
Source

pub proof fn move_forward_increases_va(self)

requires
self.inv(),
self.level <= NR_LEVELS,
self.in_locked_range(),
ensures
self.move_forward_owner_spec().va.to_vaddr() > self.va.to_vaddr(),
Source

pub proof fn move_forward_not_popped_too_high(self)

requires
self.inv(),
self.level <= NR_LEVELS,
self.in_locked_range(),
ensures
!self.move_forward_owner_spec().popped_too_high,
Source

pub proof fn move_forward_owner_decreases_steps(self)

requires
self.inv(),
self.level <= NR_LEVELS,
ensures
self.move_forward_owner_spec().max_steps() < self.max_steps(),
Source

pub proof fn move_forward_va_is_align_up(self)

requires
self.inv(),
self.level <= NR_LEVELS,
ensures
self.move_forward_owner_spec().va == self.va.align_up(self.level as int),
Source

pub proof fn pop_level_owner_preserves_mappings(self)

requires
self.inv(),
self.level < NR_LEVELS,
self.in_locked_range(),
ensures
self.pop_level_owner_spec().0@.mappings == self@.mappings,

After popping a level, the total view_mappings is preserved. The restored parent at index self.level absorbs the child’s mappings, and both are within the view_mappings range [self.level, NR_LEVELS).

Source

pub proof fn move_forward_owner_preserves_mappings(self)

requires
self.inv(),
self.in_locked_range(),
ensures
self.move_forward_owner_spec()@.mappings == self@.mappings,
Source

pub proof fn move_forward_owner_preserves_in_locked_range(self)

requires
self.inv(),
self.level <= NR_LEVELS,
self.in_locked_range(),
ensures
self.move_forward_owner_spec().in_locked_range(),

After move_forward_owner_spec, the cursor remains within the locked range.

Source

pub proof fn move_forward_pop_loop_level_lt_guard(self, owner0: Self)

requires
self.inv(),
self.in_locked_range(),
self.level <= self.guard_level,
owner0.inv(),
owner0.in_locked_range(),
self.move_forward_owner_spec() == owner0.move_forward_owner_spec(),
!owner0.move_forward_owner_spec().popped_too_high,
ensures
self.level < self.guard_level,

After the pop loop in move_forward, the cursor level is strictly below guard_level.

The loop exits when level >= guard_level OR pte_index != 0. The level >= guard_level case is impossible: reaching guard_level via pop would set popped_too_high = true, but the loop invariant owner.move_forward_owner_spec() == owner0.move_forward_owner_spec() combined with move_forward_not_popped_too_high gives !owner.move_forward_owner_spec().popped_too_high, and move_forward_owner_spec() on a popped_too_high state gives a different result — a contradiction.

Source§

impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>

Source

pub open spec fn node_unlocked( guards: Guards<'rcu, C>, ) -> FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>

{
    |owner: EntryOwner<C>, path: TreePath<NR_ENTRIES>| {
        owner.is_node() ==> guards.unlocked(owner.node.unwrap().meta_perm.addr())
    }
}
Source

pub open spec fn node_unlocked_except( guards: Guards<'rcu, C>, addr: usize, ) -> FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>

{
    |owner: EntryOwner<C>, path: TreePath<NR_ENTRIES>| {
        owner.is_node()
            ==> (owner.node.unwrap().meta_perm.addr() != addr
                ==> guards.unlocked(owner.node.unwrap().meta_perm.addr()))
    }
}
Source

pub open spec fn map_full_tree( self, f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>, ) -> bool

{
    forall |i: int| {
        self.level - 1 <= i < NR_LEVELS ==> { self.continuations[i].map_children(f) }
    }
}
Source

pub open spec fn map_only_children( self, f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>, ) -> bool

{
    forall |i: int| {
        self.level - 1 <= i < NR_LEVELS ==> self.continuations[i].map_children(f)
    }
}
Source

pub open spec fn children_not_locked(self, guards: Guards<'rcu, C>) -> bool

{ self.map_only_children(Self::node_unlocked(guards)) }
Source

pub open spec fn only_current_locked(self, guards: Guards<'rcu, C>) -> bool

{
    self.map_only_children(
        Self::node_unlocked_except(
            guards,
            self.cur_entry_owner().node.unwrap().meta_perm.addr(),
        ),
    )
}
Source

pub proof fn never_drop_restores_children_not_locked( self, guard: PageTableGuard<'rcu, C>, guards0: Guards<'rcu, C>, guards1: Guards<'rcu, C>, )

requires
self.inv(),
self.only_current_locked(guards0),
<PageTableGuard<'rcu, C> as TrackDrop>::constructor_requires(guard, guards0),
<PageTableGuard<'rcu, C> as TrackDrop>::constructor_ensures(guard, guards0, guards1),
ensures
self.children_not_locked(guards1),
Source

pub proof fn never_drop_restores_nodes_locked( self, guard: PageTableGuard<'rcu, C>, guards0: Guards<'rcu, C>, guards1: Guards<'rcu, C>, )

requires
self.inv(),
self.nodes_locked(guards0),
<PageTableGuard<'rcu, C> as TrackDrop>::constructor_requires(guard, guards0),
<PageTableGuard<'rcu, C> as TrackDrop>::constructor_ensures(guard, guards0, guards1),
forall |i: int| {
    self.level - 1 <= i < NR_LEVELS
        ==> self.continuations[i].guard_perm.value().inner.inner@.ptr.addr()
            != guard.inner.inner@.ptr.addr()
},
ensures
self.nodes_locked(guards1),

After dropping the guard for the popped level, nodes_locked is preserved for the new (higher-level) owner, because the dropped guard’s address is not among those checked by nodes_locked (which covers levels >= self.level - 1).

Source

pub proof fn protect_preserves_cursor_inv_relate( self, other: Self, regions: MetaRegionOwners, )

requires
self.inv(),
self.relate_region(regions),
self.cur_entry_owner().is_frame(),
other.cur_entry_owner().is_frame(),
other.cur_entry_owner().inv(),
other.cur_entry_owner().frame.unwrap().mapped_pa
    == self.cur_entry_owner().frame.unwrap().mapped_pa,
other.cur_entry_owner().frame.unwrap().slot_perm
    == self.cur_entry_owner().frame.unwrap().slot_perm,
other.cur_entry_owner().path == self.cur_entry_owner().path,
other.cur_entry_owner().parent_level == self.cur_entry_owner().parent_level,
self.level == other.level,
self.guard_level == other.guard_level,
self.va == other.va,
self.prefix == other.prefix,
self.popped_too_high == other.popped_too_high,
forall |i: int| {
    self.level <= i < NR_LEVELS
        ==> #[trigger] self.continuations[i] == other.continuations[i]
},
other.continuations[self.level - 1].inv(),
other.continuations[self.level - 1].all_some(),
ensures
other.inv(),
other.relate_region(regions),

After a protect operation that only modifies frame.prop of the current entry, CursorOwner::inv() and relate_region are preserved.

Safety: protect changes only frame.prop and updates parent.children_perm to match. EntryOwner::inv() is preserved (from protect postcondition). relate_region is preserved because it doesn’t use frame.prop. rel_children holds via match_pte (from protect’s wf/node_matching postconditions).

The axiom requires only the semantic properties of the modified entry that are checked by inv and relate_region; the structural identity of other continuations is trusted to hold from the tracked restore operations in the caller.

Source

pub proof fn map_children_implies( self, f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>, g: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>, )

requires
self.inv(),
OwnerSubtree::implies(f, g),
forall |i: int| self.level - 1 <= i < NR_LEVELS ==> self.continuations[i].map_children(f),
ensures
forall |i: int| self.level - 1 <= i < NR_LEVELS ==> self.continuations[i].map_children(g),
Source

pub open spec fn nodes_locked(self, guards: Guards<'rcu, C>) -> bool

{
    forall |i: int| {
        self.level - 1 <= i < NR_LEVELS ==> { self.continuations[i].node_locked(guards) }
    }
}
Source

pub open spec fn index(self) -> usize

{ self.continuations[self.level - 1].idx }
Source

pub open spec fn inc_index(self) -> Self

{
    Self {
        continuations: self
            .continuations
            .insert(self.level - 1, self.continuations[self.level - 1].inc_index()),
        va: AbstractVaddr {
            offset: self.va.offset,
            index: self
                .va
                .index
                .insert(
                    self.level - 1,
                    self.continuations[self.level - 1].inc_index().idx as int,
                ),
        },
        popped_too_high: false,
        ..self
    }
}
Source

pub open spec fn zero_below_level_rec(self, level: PagingLevel) -> Self

{
    if self.level <= level {
        self
    } else {
        Self {
            va: AbstractVaddr {
                offset: self.va.offset,
                index: self.va.index.insert(level - 1, 0),
            },
            ..self.zero_below_level_rec((level + 1) as u8)
        }
    }
}
Source

pub open spec fn zero_below_level(self) -> Self

{ self.zero_below_level_rec(1u8) }
Source

pub proof fn zero_below_level_rec_preserves_above(self, level: PagingLevel)

ensures
forall |lv: int| {
    lv >= self.level
        ==> self.zero_below_level_rec(level).va.index[lv] == #[trigger] self.va.index[lv]
},
Source

pub proof fn zero_preserves_above(self)

ensures
forall |lv: int| {
    lv >= self.level
        ==> self.zero_below_level().va.index[lv] == #[trigger] self.va.index[lv]
},
Source

pub proof fn do_zero_below_level(tracked &mut self)

requires
old(self).inv(),
ensures
*self == old(self).zero_below_level(),
Source

pub proof fn do_inc_index(tracked &mut self)

requires
old(self).inv(),
old(self).continuations[old(self).level - 1].idx + 1 < NR_ENTRIES,
old(self).level == NR_LEVELS
    ==> (old(self).continuations[old(self).level - 1].idx + 1)
        < C::TOP_LEVEL_INDEX_RANGE_spec().end,
ensures
self.inv(),
*self == old(self).inc_index(),
Source

pub proof fn inc_and_zero_increases_va(self)

requires
self.inv(),
ensures
self.inc_index().zero_below_level().va.to_vaddr() > self.va.to_vaddr(),
Source

pub proof fn zero_rec_preserves_all_but_va(self, level: PagingLevel)

ensures
self.zero_below_level_rec(level).level == self.level,
self.zero_below_level_rec(level).continuations == self.continuations,
self.zero_below_level_rec(level).guard_level == self.guard_level,
self.zero_below_level_rec(level).prefix == self.prefix,
self.zero_below_level_rec(level).popped_too_high == self.popped_too_high,
Source

pub proof fn zero_preserves_all_but_va(self)

ensures
self.zero_below_level().level == self.level,
self.zero_below_level().continuations == self.continuations,
self.zero_below_level().guard_level == self.guard_level,
self.zero_below_level().prefix == self.prefix,
self.zero_below_level().popped_too_high == self.popped_too_high,
Source

pub open spec fn cur_va(self) -> Vaddr

{ self.va.to_vaddr() }
Source

pub open spec fn cur_va_range(self) -> Range<AbstractVaddr>

{
    let start = self.va.align_down(self.level as int);
    let end = self.va.align_up(self.level as int);
    Range { start, end }
}
Source

pub proof fn cur_va_range_reflects_view(self)

requires
self.inv(),
ensures
self.cur_va_range().start.reflect(self@.query_range().start),
self.cur_va_range().end.reflect(self@.query_range().end),
Source

pub proof fn cur_va_in_subtree_range(self)

requires
self.inv(),
ensures
vaddr(self.cur_subtree().value.path) <= self.cur_va()
    < vaddr(self.cur_subtree().value.path) + page_size(self.level as PagingLevel),

The current virtual address falls within the VA range of the current subtree’s path.

Source

pub proof fn cur_subtree_eq_filtered_mappings(self)

requires
self.inv(),
ensures
PageTableOwner(self.cur_subtree())@.mappings
    == self@
        .mappings
        .filter(|m: Mapping| (
            self@.cur_va <= m.va_range.start < self@.cur_va + page_size(self.level)
        )),

The current subtree’s mappings equal the filter over [cur_va, cur_va + page_size(level)).

Source

pub proof fn subtree_va_ranges_disjoint(self, j: int)

requires
self.inv(),
0 <= j < NR_ENTRIES,
j != self.index(),
self.continuations[self.level - 1].children[j] is Some,
ensures
vaddr(self.continuations[self.level - 1].path().push_tail(j as usize))
    + page_size(self.level as PagingLevel) <= self.cur_va()
    || self.cur_va()
        < vaddr(self.continuations[self.level - 1].path().push_tail(j as usize)),

Subtrees at different indices have disjoint VA ranges.

Source

pub proof fn higher_level_children_disjoint(self, i: int, j: int)

requires
self.inv(),
self.level - 1 < i < NR_LEVELS,
0 <= j < NR_ENTRIES,
j != self.continuations[i].idx,
self.continuations[i].children[j] is Some,
ensures
vaddr(self.continuations[i].path().push_tail(j as usize))
    + page_size((i + 1) as PagingLevel) <= self.cur_va()
    || self.cur_va() < vaddr(self.continuations[i].path().push_tail(j as usize)),

Children of higher-level continuations have VA ranges that don’t include cur_va, because cur_va’s indices at those levels match the path to the current position.

Source

pub proof fn mapping_covering_cur_va_from_cur_subtree(self, m: Mapping)

requires
self.inv(),
self.view_mappings().contains(m),
m.va_range.start <= self.cur_va() < m.va_range.end,
ensures
PageTableOwner(self.cur_subtree()).view_rec(self.cur_subtree().value.path).contains(m),

Any mapping that covers cur_va must come from the current subtree. This follows from the disjointness of VA ranges and the fact that cur_va falls within the current subtree’s VA range.

Source

pub proof fn inv_continuation(self, i: int)

requires
self.inv(),
self.level - 1 <= i <= NR_LEVELS - 1,
ensures
self.continuations.contains_key(i),
self.continuations[i].inv(),
self.continuations[i].children.len() == NR_ENTRIES,
Source

pub open spec fn view_mappings(self) -> Set<Mapping>

{
    Set::new(|m: Mapping| {
        exists |i: int| {
            self.level - 1 <= i < NR_LEVELS
                && self.continuations[i].view_mappings().contains(m)
        }
    })
}
Source

pub proof fn view_mappings_take_lowest(self, new: Self)

requires
self.inv(),
new.continuations == self.continuations.remove(self.level - 1),
ensures
new.view_mappings()
    == self.view_mappings() - self.continuations[self.level - 1].view_mappings(),
Source

pub proof fn view_mappings_put_lowest( self, new: Self, cont: CursorContinuation<'_, C>, )

requires
cont.inv(),
new.inv(),
new.continuations == self.continuations.insert(self.level - 1, cont),
ensures
new.view_mappings() == self.view_mappings() + cont.view_mappings(),
Source

pub open spec fn as_page_table_owner(self) -> PageTableOwner<C>

{
    if self.level == 1 {
        let l1 = self.continuations[0];
        let l2 = self.continuations[1].restore_spec(l1).0;
        let l3 = self.continuations[2].restore_spec(l2).0;
        let l4 = self.continuations[3].restore_spec(l3).0;
        l4.as_page_table_owner()
    } else if self.level == 2 {
        let l2 = self.continuations[1];
        let l3 = self.continuations[2].restore_spec(l2).0;
        let l4 = self.continuations[3].restore_spec(l3).0;
        l4.as_page_table_owner()
    } else if self.level == 3 {
        let l3 = self.continuations[2];
        let l4 = self.continuations[3].restore_spec(l3).0;
        l4.as_page_table_owner()
    } else {
        let l4 = self.continuations[3];
        l4.as_page_table_owner()
    }
}
Source

pub proof fn as_page_table_owner_preserves_view_mappings(self)

requires
self.inv(),
ensures
self.as_page_table_owner().view_rec(self.continuations[3].path()) == self.view_mappings(),
Source

pub open spec fn cur_entry_owner(self) -> EntryOwner<C>

{ self.cur_subtree().value }
Source

pub open spec fn cur_subtree(self) -> OwnerSubtree<C>

{ self.continuations[self.level - 1].children[self.index() as int].unwrap() }
Source

pub proof fn borrow_cur_frame_slot_perm(tracked &self) -> tracked res : &PointsTo<MetaSlot>

requires
self.cur_entry_owner().is_frame(),
ensures
*res == self.cur_entry_owner().frame.unwrap().slot_perm,

Borrows the slot permission from the current frame entry owner.

This is an axiom because expressing the structural borrow through the nested Map/Seq/Option layers is not yet supported directly in Verus. The axiom is safe: it only provides a shared reference to data already logically owned by self, and the borrow cannot outlive self.

Source

pub proof fn cur_frame_clone_requires( self, item: C::Item, pa: Paddr, level: PagingLevel, prop: PageProperty, regions: MetaRegionOwners, )

requires
self.inv(),
self.relate_region(regions),
self.cur_entry_owner().is_frame(),
pa == self.cur_entry_owner().frame.unwrap().mapped_pa,
C::item_from_raw_spec(pa, level, prop) == item,
ensures
item
    .clone_requires(
        self.cur_entry_owner().frame.unwrap().slot_perm,
        regions.slot_owners[frame_to_index(pa)].inner_perms.ref_count,
    ),

Axiom: the item reconstructed from the current frame’s physical address satisfies clone_requires.

Safety: When relate_region holds for a frame entry, the item reconstructed via item_from_raw_spec(pa, ...) is the original frame item. The frame’s slot permission (owned by the cursor) has the correct address, is initialised, and its ref count is in the valid clonable range (> 0, < REF_COUNT_MAX), so clone_requires is satisfied.

Source

pub proof fn clone_item_preserves_invariants( self, old_regions: MetaRegionOwners, new_regions: MetaRegionOwners, idx: usize, )

requires
self.inv(),
self.relate_region(old_regions),
old_regions.inv(),
self.cur_entry_owner().is_frame(),
idx == frame_to_index(self.cur_entry_owner().frame.unwrap().mapped_pa),
old_regions.slot_owners.contains_key(idx),
new_regions.slot_owners.contains_key(idx),
new_regions.slot_owners[idx].inner_perms.ref_count.value()
    == old_regions.slot_owners[idx].inner_perms.ref_count.value() + 1,
new_regions.slot_owners[idx].inner_perms.ref_count.id()
    == old_regions.slot_owners[idx].inner_perms.ref_count.id(),
new_regions.slot_owners[idx].inner_perms.storage
    == old_regions.slot_owners[idx].inner_perms.storage,
new_regions.slot_owners[idx].inner_perms.vtable_ptr
    == old_regions.slot_owners[idx].inner_perms.vtable_ptr,
new_regions.slot_owners[idx].inner_perms.in_list
    == old_regions.slot_owners[idx].inner_perms.in_list,
new_regions.slot_owners[idx].path_if_in_pt == old_regions.slot_owners[idx].path_if_in_pt,
new_regions.slot_owners[idx].self_addr == old_regions.slot_owners[idx].self_addr,
new_regions.slot_owners[idx].raw_count == old_regions.slot_owners[idx].raw_count,
new_regions.slot_owners[idx].usage == old_regions.slot_owners[idx].usage,
new_regions.slot_owners.dom() == old_regions.slot_owners.dom(),
forall |i: usize| {
    i != idx && old_regions.slot_owners.contains_key(i)
        ==> new_regions.slot_owners[i] == old_regions.slot_owners[i]
},
new_regions.slots == old_regions.slots,
0 < old_regions.slot_owners[idx].inner_perms.ref_count.value(),
old_regions.slot_owners[idx].inner_perms.ref_count.value() + 1 < REF_COUNT_MAX,
ensures
new_regions.inv(),
self.relate_region(new_regions),

Incrementing the ref count of the current frame preserves regions.inv() and self.relate_region(new_regions).

Source

pub proof fn cur_entry_node_implies_level_gt_1(self)

requires
self.inv(),
self.cur_entry_owner().is_node(),
ensures
self.level > 1,

If the current entry is a page table node, the cursor must be at level >= 2.

Proof: the current child subtree has is_node(), so by the ghost-tree la_inv (is_node() ==> tree_level < INC_LEVELS - 1), its tree level satisfies INC_LEVELS - self.level < INC_LEVELS - 1, i.e., self.level > 1.

Source

pub proof fn frame_not_fits_implies_level_gt_1( self, cur_entry_fits_range: bool, cur_va: Vaddr, end: Vaddr, )

requires
self.inv(),
self.cur_entry_owner().is_frame(),
!cur_entry_fits_range,
cur_va < end,
cur_entry_fits_range
    == (cur_va == self.cur_va_range().start.to_vaddr()
        && self.cur_va_range().end.to_vaddr() <= end),
cur_va as nat % PAGE_SIZE as nat == 0,
end as nat % PAGE_SIZE as nat == 0,
ensures
self.level > 1,

A frame entry at the cursor’s current level that doesn’t fit the aligned range [cur_va, end) must be at level > 1. Justification: At level 1, page_size(1) == BASE_PAGE_SIZE. Since the cursor VA and end are BASE_PAGE_SIZE-aligned and cur_va < end, we have cur_va + page_size(1) <= end, so a level-1 frame always fits. Therefore !cur_entry_fits_range implies level > 1.

Source

pub proof fn split_while_huge_node_noop(self)

requires
self.inv(),
self.cur_entry_owner().is_node(),
self.level > 1,
ensures
self@.split_while_huge(page_size((self.level - 1) as PagingLevel)) == self@,
Source

pub proof fn split_while_huge_absent_noop(self, size: usize)

requires
self.inv(),
self.cur_entry_owner().is_absent(),
ensures
self@.split_while_huge(size) == self@,

When the current entry is absent, there is no mapping at cur_va in the abstract view, so split_while_huge finds nothing to split and is a no-op for any target size.

Source

pub proof fn split_while_huge_at_level_noop(self)

requires
self.inv(),
ensures
self@.split_while_huge(page_size(self.level as PagingLevel)) == self@,
Source

pub proof fn new_child_mappings_eq_target( self, new_subtree: OwnerSubtree<C>, pa: Paddr, level: PagingLevel, prop: PageProperty, )

requires
self.inv(),
level == self.level,
new_subtree.inv(),
new_subtree.value.is_frame(),
new_subtree.value.path
    == self
        .continuations[self.level as int - 1]
        .path()
        .push_tail(self.continuations[self.level as int - 1].idx as usize),
new_subtree.value.frame.unwrap().mapped_pa == pa,
new_subtree.value.frame.unwrap().prop == prop,
ensures
PageTableOwner(new_subtree)@.mappings
    == set![
        Mapping { va_range : self @.cur_slot_range(page_size(level)), pa_range : pa.. (pa
        + page_size(level)) as usize, page_size : page_size(level), property : prop, }
    ],

A new frame subtree at the current position has mappings equal to the singleton mapping covering the current slot range.

Source

pub proof fn map_branch_none_inv_holds(self, owner0: Self)

requires
owner0.inv(),
self.level == owner0.level,
self.va == owner0.va,
self.guard_level == owner0.guard_level,
self.prefix == owner0.prefix,
self.popped_too_high == owner0.popped_too_high,
forall |i: int| {
    self.level <= i < NR_LEVELS
        ==> #[trigger] self.continuations[i] == owner0.continuations[i]
},
self.continuations[self.level - 1].inv(),
self.continuations[self.level - 1].all_some(),
self.continuations[self.level - 1].idx == owner0.continuations[owner0.level - 1].idx,
self.continuations[self.level - 1].entry_own.parent_level
    == owner0.continuations[owner0.level - 1].entry_own.parent_level,
self.continuations[self.level - 1].guard_perm.value().inner.inner@.ptr.addr()
    == owner0.continuations[owner0.level - 1].guard_perm.value().inner.inner@.ptr.addr(),
self.continuations[self.level - 1].path()
    == owner0.continuations[owner0.level - 1].path(),
self.va.index[self.level - 1] == self.continuations[self.level - 1].idx,
self.continuations.dom() =~= owner0.continuations.dom(),
ensures
self.inv(),

After alloc_if_none (absent→node) + restore, the cursor invariant holds.

Source

pub proof fn map_branch_none_path_tracked_holds( self, owner0: Self, regions: MetaRegionOwners, old_regions: MetaRegionOwners, )

requires
owner0.relate_region(old_regions),
self.inv(),
self.level == owner0.level,
forall |i: int| {
    self.level <= i < NR_LEVELS
        ==> #[trigger] self.continuations[i] == owner0.continuations[i]
},
Entry::<C>::path_tracked_pred_preserved(old_regions, regions),
self
    .continuations[self.level - 1]
    .map_children(PageTableOwner::<C>::path_tracked_pred(regions)),
ensures
self.map_full_tree(PageTableOwner::<C>::path_tracked_pred(regions)),

After alloc_if_none (absent→node), path_tracked_pred transfers via map_children_lift.

Source

pub proof fn map_branch_none_no_new_mappings(self, owner0: Self)

requires
owner0.inv(),
self.inv(),
self.level == owner0.level,
self.va == owner0.va,
forall |i: int| {
    self.level <= i < NR_LEVELS
        ==> #[trigger] self.continuations[i] == owner0.continuations[i]
},
owner0
    .continuations[owner0.level - 1]
    .children[owner0.continuations[owner0.level - 1].idx as int] is Some,
owner0
    .continuations[owner0.level - 1]
    .children[owner0.continuations[owner0.level - 1].idx as int]
    .unwrap()
    .value
    .is_absent(),
self
    .continuations[self.level - 1]
    .children[self.continuations[self.level - 1].idx as int] is Some,
self
    .continuations[self.level - 1]
    .children[self.continuations[self.level - 1].idx as int]
    .unwrap()
    .value
    .is_node(),
self.continuations[self.level - 1].path()
    == owner0.continuations[owner0.level - 1].path(),
forall |j: int| {
    0 <= j < NR_ENTRIES && j != owner0.continuations[owner0.level - 1].idx as int
        ==> #[trigger] self.continuations[self.level - 1].children[j]
            == owner0.continuations[owner0.level - 1].children[j]
},
PageTableOwner(
        self
            .continuations[self.level - 1]
            .children[self.continuations[self.level - 1].idx as int]
            .unwrap(),
    )
    .view_rec(
        self
            .continuations[self.level - 1]
            .path()
            .push_tail(self.continuations[self.level - 1].idx as usize),
    ) =~= Set::<Mapping>::empty(),
ensures
self.view_mappings() =~= owner0.view_mappings(),

After alloc_if_none (absent→node), view_mappings is unchanged (both contribute zero mappings).

Source

pub proof fn map_branch_none_cur_entry_absent(self)

requires
self.inv(),
forall |i: int| {
    0 <= i < NR_ENTRIES
        ==> #[trigger] self.continuations[self.level - 1].children[i] is Some
            && self.continuations[self.level - 1].children[i].unwrap().value.is_absent()
},
ensures
self.cur_entry_owner().is_absent(),

After map_branch_none (alloc_if_none + push_level), the current entry is absent.

Proof: alloc_if_none creates an empty PT node where all children are absent (allocated_empty_node_owner line 172). push_level enters one of these children, so cur_entry_owner().is_absent() holds.

Source

pub proof fn map_branch_frame_split_while_huge( self, owner0: Self, owner_before_frame: Self, level_before_frame: int, )

requires
self.inv(),
owner0.inv(),
owner_before_frame.inv(),
1 <= level_before_frame - 1,
level_before_frame <= NR_LEVELS,
self.level == (level_before_frame - 1) as u8,
owner_before_frame@
    == owner0@.split_while_huge(page_size(level_before_frame as PagingLevel)),
self@
    == owner_before_frame@
        .split_if_mapped_huge_spec(page_size((level_before_frame - 1) as PagingLevel)),
ensures
self@ == owner0@.split_while_huge(page_size(self.level as PagingLevel)),

After map_branch_none splits a huge frame at level level_before_frame and descends, the cursor view equals owner0@.split_while_huge(page_size(level_before_frame - 1)).

Chain: owner@ = owner_before_frame@.split_if_mapped_huge_spec(page_size(level_before_frame - 1)) = owner0@.split_while_huge(page_size(level_before_frame)).split_if_mapped_huge_spec(…) = owner0@.split_while_huge(page_size(level_before_frame - 1)) The last equality uses the fact that split_while_huge(L) on a frame of size page_size(L) takes exactly one split step to page_size(L-1), matching split_if_mapped_huge_spec.

Source

pub proof fn find_next_split_push_equals_split_while_huge( self, old_view: CursorView<C>, )

requires
self.inv(),
self.cur_entry_owner().is_frame(),
self@.cur_va == old_view.cur_va,
old_view.present(),
self@.mappings
    =~= old_view.split_if_mapped_huge_spec(page_size(self.level as PagingLevel)).mappings,
ensures
self@.mappings
    =~= old_view.split_while_huge(page_size(self.level as PagingLevel)).mappings,

After split_if_mapped_huge + push_level, the mappings equal old_view.split_while_huge(page_size(current_level)).

Source

pub proof fn split_while_huge_cur_va_independent( v1: CursorView<C>, v2: CursorView<C>, size: usize, )

requires
v1.inv(),
v2.inv(),
v1.mappings =~= v2.mappings,
v1.cur_va <= v2.cur_va,
v1
    .mappings
    .filter(|m: Mapping| v1.cur_va <= m.va_range.start && m.va_range.start < v2.cur_va)
    =~= Set::<Mapping>::empty(),
!v1.present() && v2.present() ==> v2.query_mapping().page_size <= size,
ensures
v1.split_while_huge(size).mappings =~= v2.split_while_huge(size).mappings,

split_while_huge gives the same mappings for two cur_va values when no mapping starts between them and the !present case is a no-op.

Source

pub open spec fn locked_range(self) -> Range<Vaddr>

{
    let start = self.prefix.align_down(self.guard_level as int).to_vaddr();
    let end = self.prefix.align_up(self.guard_level as int).to_vaddr();
    Range { start, end }
}
Source

pub open spec fn in_locked_range(self) -> bool

{ self.locked_range().start <= self.va.to_vaddr() < self.locked_range().end }
Source

pub open spec fn above_locked_range(self) -> bool

{ self.va.to_vaddr() >= self.locked_range().end }
Source

pub proof fn prefix_in_locked_range(self)

requires
forall |i: int| i >= self.guard_level ==> self.va.index[i] == self.prefix.index[i],
ensures
self.in_locked_range(),
Source

pub proof fn in_locked_range_level_lt_nr_levels(self)

requires
self.inv(),
self.in_locked_range(),
!self.popped_too_high,
ensures
self.level < NR_LEVELS,
Source

pub proof fn in_locked_range_level_lt_guard_level(self)

requires
self.inv(),
self.in_locked_range(),
!self.popped_too_high,
ensures
self.level < self.guard_level,
Source

pub proof fn node_within_locked_range(self, level: PagingLevel)

requires
self.in_locked_range(),
1 <= level < self.guard_level,
self.va.inv(),
ensures
self.locked_range().start
    <= nat_align_down(
        self.va.to_vaddr() as nat,
        page_size((level + 1) as PagingLevel) as nat,
    ) as usize,
nat_align_down(self.va.to_vaddr() as nat, page_size((level + 1) as PagingLevel) as nat)
    as usize + page_size((level + 1) as PagingLevel) <= self.locked_range().end,

The node at level+1 containing va fits within the locked range.

Source

pub proof fn locked_range_page_aligned(self)

requires
self.inv(),
ensures
self.locked_range().end % PAGE_SIZE == 0,
self.locked_range().start % PAGE_SIZE == 0,
Source

pub proof fn cur_subtree_inv(self)

requires
self.inv(),
ensures
self.cur_subtree().inv(),
Source

pub proof fn cur_entry_absent_not_present(self)

requires
self.inv(),
self.cur_entry_owner().is_absent(),
ensures
!self@.present(),

If the current entry is absent, !self@.present().

Source

pub proof fn cur_subtree_empty_not_present(self)

requires
self.inv(),
PageTableOwner(self.cur_subtree()).view_rec(self.cur_subtree().value.path) =~= set![],
ensures
!self@.present(),

Generalises cur_entry_absent_not_present to any empty subtree.

Source

pub proof fn cur_entry_frame_present(self)

requires
self.inv(),
self.cur_entry_owner().is_frame(),
ensures
self@.present(),
self@
    .query(
        self.cur_entry_owner().frame.unwrap().mapped_pa,
        self.cur_entry_owner().frame.unwrap().size,
        self.cur_entry_owner().frame.unwrap().prop,
    ),
Source

pub open spec fn relate_region(self, regions: MetaRegionOwners) -> bool

{
    &&& self
        .map_full_tree(|entry_owner: EntryOwner<C>, path: TreePath<NR_ENTRIES>| {
            entry_owner.relate_region(regions)
        })
    &&& self.map_full_tree(PageTableOwner::<C>::path_tracked_pred(regions))

}
Source

pub open spec fn not_in_tree(self, owner: EntryOwner<C>) -> bool

{
    self.map_full_tree(|owner0: EntryOwner<C>, path: TreePath<NR_ENTRIES>| {
        owner0.meta_slot_paddr_neq(owner)
    })
}
Source

pub proof fn absent_not_in_tree(self, owner: EntryOwner<C>)

requires
self.inv(),
owner.inv(),
owner.is_absent(),
ensures
self.not_in_tree(owner),
Source

pub proof fn not_in_tree_from_not_mapped( self, regions: MetaRegionOwners, new_entry: EntryOwner<C>, )

requires
self.inv(),
self.relate_region(regions),
new_entry.meta_slot_paddr() is Some,
regions
    .slot_owners[frame_to_index(new_entry.meta_slot_paddr().unwrap())]
    .path_if_in_pt is None,
ensures
self.not_in_tree(new_entry),

If the cursor owner’s tree satisfies relate_region(regions), and a new entry’s physical address is not currently tracked in the page table (path_if_in_pt is None), then no existing entry in the tree has the same physical address as the new entry.

This lemma encapsulates the map_children_implies proof for not_in_tree, factored out so it runs in its own Z3 context (avoiding rlimit issues when called from large functions).

Source

pub proof fn relate_region_preserved( self, other: Self, regions0: MetaRegionOwners, regions1: MetaRegionOwners, )

requires
self.inv(),
self.relate_region(regions0),
self.level == other.level,
self.continuations =~= other.continuations,
OwnerSubtree::implies(
    PageTableOwner::<C>::relate_region_pred(regions0),
    PageTableOwner::<C>::relate_region_pred(regions1),
),
OwnerSubtree::implies(
    PageTableOwner::<C>::path_tracked_pred(regions0),
    PageTableOwner::<C>::path_tracked_pred(regions1),
),
ensures
other.relate_region(regions1),
Source

pub proof fn relate_region_slot_owners_preserved( self, regions0: MetaRegionOwners, regions1: MetaRegionOwners, )

requires
self.inv(),
self.relate_region(regions0),
regions0.slot_owners =~= regions1.slot_owners,
ensures
self.relate_region(regions1),

Transfers relate_region when slot_owners is preserved.

Source

pub proof fn relate_region_slot_owners_rc_increment( self, regions0: MetaRegionOwners, regions1: MetaRegionOwners, idx: usize, )

requires
self.inv(),
self.relate_region(regions0),
regions0.inv(),
regions1.slots == regions0.slots,
regions1.slot_owners.dom() == regions0.slot_owners.dom(),
regions1.slot_owners[idx].inner_perms.ref_count.value()
    == regions0.slot_owners[idx].inner_perms.ref_count.value() + 1,
regions1.slot_owners[idx].inner_perms.ref_count.id()
    == regions0.slot_owners[idx].inner_perms.ref_count.id(),
regions1.slot_owners[idx].inner_perms.storage
    == regions0.slot_owners[idx].inner_perms.storage,
regions1.slot_owners[idx].inner_perms.vtable_ptr
    == regions0.slot_owners[idx].inner_perms.vtable_ptr,
regions1.slot_owners[idx].inner_perms.in_list
    == regions0.slot_owners[idx].inner_perms.in_list,
regions1.slot_owners[idx].path_if_in_pt == regions0.slot_owners[idx].path_if_in_pt,
regions1.slot_owners[idx].self_addr == regions0.slot_owners[idx].self_addr,
regions1.slot_owners[idx].raw_count == regions0.slot_owners[idx].raw_count,
regions1.slot_owners[idx].usage == regions0.slot_owners[idx].usage,
regions1.slot_owners[idx].inner_perms.ref_count.value() != REF_COUNT_UNUSED,
forall |i: usize| {
    i != idx && regions0.slot_owners.contains_key(i)
        ==> regions1.slot_owners[i] == regions0.slot_owners[i]
},
ensures
self.relate_region(regions1),
Source

pub proof fn relate_region_borrow_slot( self, regions0: MetaRegionOwners, regions1: MetaRegionOwners, changed_idx: usize, )

requires
self.inv(),
self.relate_region(regions0),
regions1.inv(),
regions0.slot_owners[changed_idx].raw_count == 0,
regions1.slot_owners[changed_idx].raw_count == 1,
regions1.slot_owners[changed_idx].inner_perms
    == regions0.slot_owners[changed_idx].inner_perms,
regions1.slot_owners[changed_idx].self_addr
    == regions0.slot_owners[changed_idx].self_addr,
regions1.slot_owners[changed_idx].usage == regions0.slot_owners[changed_idx].usage,
regions1.slot_owners[changed_idx].path_if_in_pt
    == regions0.slot_owners[changed_idx].path_if_in_pt,
forall |i: usize| i != changed_idx ==> regions0.slot_owners[i] == regions1.slot_owners[i],
regions0.slot_owners.dom() =~= regions1.slot_owners.dom(),
ensures
self.relate_region(regions1),

Transfers relate_region when raw_count changed from 0 to 1 at one index. Uses map_implies_and with not_in_scope_pred since tree entries have !in_scope.

Source

pub proof fn cont_entries_relate_region(self, regions: MetaRegionOwners)

requires
self.inv(),
self.relate_region(regions),
ensures
forall |i: int| {
    self.level - 1 <= i < NR_LEVELS
        ==> {
            &&& self.continuations[i].entry_own.relate_region(regions)
            &&& PageTableOwner::<
                C,
            >::path_tracked_pred(
                regions,
            )(self.continuations[i].entry_own, self.continuations[i].path())

        }
},

Continuation entry_owns satisfy relate_region and path_tracked_pred.

§Justification

When the cursor descends into a subtree, each continuation’s entry_own was previously checked by tree_predicate_map in the parent’s child subtree. After descent, map_full_tree only covers the siblings (the taken child is None), so the path entries’ properties are no longer covered by map_full_tree. However, regions is unchanged since descent, so the properties still hold.

Source

pub proof fn cursor_path_nesting(self, i: int, j: int)

requires
self.inv(),
self.level - 1 <= j < i,
i < NR_LEVELS,
ensures
self.continuations[j].path().len() as int > self.continuations[i].path().len() as int,
self.continuations[j].path().index(self.continuations[i].path().len() as int)
    == self.continuations[i].idx,

Cursor path nesting: for two continuations where i > j >= self.level - 1, cont_i is an ancestor of cont_j in the page table tree. The path from the root to cont_j passes through cont_i.idx at level cont_i, i.e., cont_j.path()[cont_i.path().len()] == cont_i.idx.

This holds because the cursor was built by descending through cont_i.idx at each level.

Source

pub open spec fn set_va_spec(self, new_va: AbstractVaddr) -> Self

{ Self { va: new_va, ..self } }
Source

pub proof fn set_va(tracked &mut self, new_va: AbstractVaddr)

requires
forall |i: int| {
    old(self).level - 1 <= i < NR_LEVELS ==> new_va.index[i] == old(self).va.index[i]
},
forall |i: int| {
    old(self).guard_level - 1 <= i < NR_LEVELS
        ==> new_va.index[i] == old(self).prefix.index[i]
},
ensures
*self == old(self).set_va_spec(new_va),
Source

pub open spec fn set_va_in_node_spec(self, new_va: AbstractVaddr) -> Self

{
    let old_cont = self.continuations[self.level - 1];
    Self {
        va: new_va,
        continuations: self
            .continuations
            .insert(
                self.level - 1,
                CursorContinuation {
                    idx: new_va.index[self.level - 1] as usize,
                    ..old_cont
                },
            ),
        ..self
    }
}
Source

pub proof fn set_va_in_node(tracked &mut self, new_va: AbstractVaddr)

requires
old(self).inv(),
new_va.inv(),
forall |i: int| {
    old(self).level <= i < NR_LEVELS ==> new_va.index[i] == old(self).va.index[i]
},
old(self).locked_range().start <= new_va.to_vaddr() < old(self).locked_range().end,
ensures
*self == old(self).set_va_in_node_spec(new_va),
self.inv(),

When jumping within the same page-table node, only indices at levels

= level are guaranteed to match. The entry-within-node index (level - 1) may change, so we update continuations[level-1].idx along with va.

Source

pub open spec fn new_spec( owner_subtree: OwnerSubtree<C>, idx: usize, guard_perm: GuardPerm<'rcu, C>, ) -> Self

{
    let va = AbstractVaddr {
        offset: 0,
        index: Map::new(|i: int| 0 <= i < NR_LEVELS, |i: int| 0)
            .insert(NR_LEVELS - 1, idx as int),
    };
    Self {
        level: NR_LEVELS as PagingLevel,
        continuations: Map::empty()
            .insert(
                NR_LEVELS - 1 as int,
                CursorContinuation::new_spec(owner_subtree, idx, guard_perm),
            ),
        va,
        guard_level: NR_LEVELS as PagingLevel,
        prefix: va,
        popped_too_high: false,
    }
}
Source

pub proof fn new(tracked owner_subtree: OwnerSubtree<C>, idx: usize, tracked guard_perm: GuardPerm<'rcu, C>, ) -> tracked res : Self

ensures
res == Self::new_spec(owner_subtree, idx, guard_perm),
Source

pub proof fn jump_above_locked_range_va_in_node(self, va: Vaddr, node_start: Vaddr)

requires
self.inv(),
self.level == self.guard_level,
self.above_locked_range(),
self.locked_range().start <= va < self.locked_range().end,
node_start
    == nat_align_down(
        self.va.to_vaddr() as nat,
        page_size((self.level + 1) as PagingLevel) as nat,
    ) as usize,
ensures
node_start <= va,
va < node_start + page_size((self.level + 1) as PagingLevel),
Source

pub proof fn jump_not_in_node_level_lt_guard_minus_one( self, level: PagingLevel, va: Vaddr, node_start: Vaddr, )

requires
self.inv(),
self.locked_range().start <= va < self.locked_range().end,
1 <= level,
level + 1 <= self.guard_level,
self.locked_range().start <= node_start,
node_start + page_size((level + 1) as PagingLevel) <= self.locked_range().end,
!(node_start <= va && va < node_start + page_size((level + 1) as PagingLevel)),
ensures
level + 1 < self.guard_level,

Trait Implementations§

Source§

impl<'rcu, C: PageTableConfig> Inv for CursorOwner<'rcu, C>

Source§

open spec fn inv(self) -> bool

{
    &&& self.va.inv()
    &&& 1 <= self.level <= NR_LEVELS
    &&& self.guard_level <= NR_LEVELS
    &&& C::TOP_LEVEL_INDEX_RANGE_spec().start <= self.va.index[NR_LEVELS - 1]
    &&& self.va.index[NR_LEVELS - 1] < C::TOP_LEVEL_INDEX_RANGE_spec().end
    &&& self.in_locked_range() || self.above_locked_range()
    &&& self.popped_too_high ==> self.level >= self.guard_level && self.in_locked_range()
    &&& !self.popped_too_high
        ==> self.level < self.guard_level || self.above_locked_range()
    &&& self.continuations[self.level - 1].all_some()
    &&& forall |i: int| {
        self.level <= i < NR_LEVELS
            ==> { (#[trigger] self.continuations[i]).all_but_index_some() }
    }
    &&& self.prefix.inv()
    &&& forall |i: int| i < self.guard_level ==> self.prefix.index[i] == 0
    &&& self.level <= self.guard_level
        ==> forall |i: int| {
            self.guard_level <= i < NR_LEVELS
                ==> self.va.index[i] == self.prefix.index[i]
        }
    &&& self.level <= 4
        ==> {
            &&& self.continuations.contains_key(3)
            &&& self.continuations[3].inv()
            &&& self.continuations[3].level() == 4
            &&& self.continuations[3].entry_own.parent_level == 5
            &&& self.va.index[3] == self.continuations[3].idx

        }
    &&& self.level <= 3
        ==> {
            &&& self.continuations.contains_key(2)
            &&& self.continuations[2].inv()
            &&& self.continuations[2].level() == 3
            &&& self.continuations[2].entry_own.parent_level == 4
            &&& self.va.index[2] == self.continuations[2].idx
            &&& self.continuations[2].guard_perm.value().inner.inner@.ptr.addr()
                != self.continuations[3].guard_perm.value().inner.inner@.ptr.addr()
            &&& self.continuations[2].path()
                == self
                    .continuations[3]
                    .path()
                    .push_tail(self.continuations[3].idx as usize)

        }
    &&& self.level <= 2
        ==> {
            &&& self.continuations.contains_key(1)
            &&& self.continuations[1].inv()
            &&& self.continuations[1].level() == 2
            &&& self.continuations[1].entry_own.parent_level == 3
            &&& self.va.index[1] == self.continuations[1].idx
            &&& self.continuations[1].guard_perm.value().inner.inner@.ptr.addr()
                != self.continuations[2].guard_perm.value().inner.inner@.ptr.addr()
            &&& self.continuations[1].guard_perm.value().inner.inner@.ptr.addr()
                != self.continuations[3].guard_perm.value().inner.inner@.ptr.addr()
            &&& self.continuations[1].path()
                == self
                    .continuations[2]
                    .path()
                    .push_tail(self.continuations[2].idx as usize)

        }
    &&& self.level == 1
        ==> {
            &&& self.continuations.contains_key(0)
            &&& self.continuations[0].inv()
            &&& self.continuations[0].level() == 1
            &&& self.continuations[0].entry_own.parent_level == 2
            &&& self.va.index[0] == self.continuations[0].idx
            &&& self.continuations[0].guard_perm.value().inner.inner@.ptr.addr()
                != self.continuations[1].guard_perm.value().inner.inner@.ptr.addr()
            &&& self.continuations[0].guard_perm.value().inner.inner@.ptr.addr()
                != self.continuations[2].guard_perm.value().inner.inner@.ptr.addr()
            &&& self.continuations[0].guard_perm.value().inner.inner@.ptr.addr()
                != self.continuations[3].guard_perm.value().inner.inner@.ptr.addr()
            &&& self.continuations[0].path()
                == self
                    .continuations[1]
                    .path()
                    .push_tail(self.continuations[1].idx as usize)

        }

}
Source§

impl<'rcu, C: PageTableConfig> InvView for CursorOwner<'rcu, C>

Source§

impl<'rcu, C: PageTableConfig> View for CursorOwner<'rcu, C>

Source§

open spec fn view(&self) -> Self::V

{
    CursorView {
        cur_va: self.cur_va(),
        mappings: self.view_mappings(),
        phantom: PhantomData,
    }
}
Source§

type V = CursorView<C>

Auto Trait Implementations§

§

impl<'rcu, C> Freeze for CursorOwner<'rcu, C>

§

impl<'rcu, C> !RefUnwindSafe for CursorOwner<'rcu, C>

§

impl<'rcu, C> Send for CursorOwner<'rcu, C>

§

impl<'rcu, C> Sync for CursorOwner<'rcu, C>

§

impl<'rcu, C> Unpin for CursorOwner<'rcu, C>
where <C as PageTableConfig>::E: Unpin, C: Unpin,

§

impl<'rcu, C> !UnwindSafe for CursorOwner<'rcu, C>

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

§

impl<T, VERUS_SPEC__A> FromSpec<T> for VERUS_SPEC__A
where VERUS_SPEC__A: From<T>,

§

fn obeys_from_spec() -> bool

§

fn from_spec(v: T) -> VERUS_SPEC__A

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

§

impl<T, VERUS_SPEC__A> IntoSpec<T> for VERUS_SPEC__A
where VERUS_SPEC__A: Into<T>,

§

fn obeys_into_spec() -> bool

§

fn into_spec(self) -> T

§

impl<T, U> IntoSpecImpl<U> for T
where U: From<T>,

§

fn obeys_into_spec() -> bool

§

fn into_spec(self) -> U

Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
§

impl<T, VERUS_SPEC__A> TryFromSpec<T> for VERUS_SPEC__A
where VERUS_SPEC__A: TryFrom<T>,

§

fn obeys_try_from_spec() -> bool

§

fn try_from_spec( v: T, ) -> Result<VERUS_SPEC__A, <VERUS_SPEC__A as TryFrom<T>>::Error>

Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
§

impl<T, VERUS_SPEC__A> TryIntoSpec<T> for VERUS_SPEC__A
where VERUS_SPEC__A: TryInto<T>,

§

fn obeys_try_into_spec() -> bool

§

fn try_into_spec(self) -> Result<T, <VERUS_SPEC__A as TryInto<T>>::Error>

§

impl<T, U> TryIntoSpecImpl<U> for T
where U: TryFrom<T>,

§

fn obeys_try_into_spec() -> bool

§

fn try_into_spec(self) -> Result<U, <U as TryFrom<T>>::Error>