Skip to main content

CursorOwner

Struct CursorOwner 

Source
pub struct CursorOwner<'rcu, C: PageTableConfig> {
    pub level: PagingLevel,
    pub continuations: Map<int, CursorContinuation<'rcu, C>>,
    pub va: AbstractVaddr,
    pub guard_level: PagingLevel,
    pub prefix: AbstractVaddr,
    pub popped_too_high: bool,
}

Fields§

§level: PagingLevel§continuations: Map<int, CursorContinuation<'rcu, C>>§va: AbstractVaddr§guard_level: PagingLevel§prefix: AbstractVaddr§popped_too_high: bool

Implementations§

Source§

impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>

Source

pub proof fn protect_preserves_cursor_inv_metaregion( self, other: Self, regions: MetaRegionOwners, )

requires
self.inv(),
self.metaregion_sound(regions),
self.cur_entry_owner().is_frame(),
other.cur_entry_owner().is_frame(),
other.cur_entry_owner().inv(),
other.cur_entry_owner().frame.unwrap().mapped_pa
    == self.cur_entry_owner().frame.unwrap().mapped_pa,
other.cur_entry_owner().path == self.cur_entry_owner().path,
other.cur_entry_owner().parent_level == self.cur_entry_owner().parent_level,
self.level == other.level,
self.guard_level == other.guard_level,
self.va == other.va,
self.prefix == other.prefix,
self.popped_too_high == other.popped_too_high,
forall |i: int| {
    self.level <= i < NR_LEVELS
        ==> #[trigger] self.continuations[i] == other.continuations[i]
},
other.continuations[self.level - 1].inv(),
other.continuations[self.level - 1].all_some(),
other.continuations[self.level - 1].idx == self.continuations[self.level - 1].idx,
other.continuations[self.level - 1].entry_own.parent_level
    == self.continuations[self.level - 1].entry_own.parent_level,
other.continuations[self.level - 1].guard_perm.value().inner.inner@.ptr.addr()
    == self.continuations[self.level - 1].guard_perm.value().inner.inner@.ptr.addr(),
other.continuations[self.level - 1].path() == self.continuations[self.level - 1].path(),
other.continuations.dom() =~= self.continuations.dom(),
forall |j: int| {
    0 <= j < NR_ENTRIES && j != self.continuations[self.level - 1].idx as int
        ==> #[trigger] other.continuations[self.level - 1].children[j]
            == self.continuations[self.level - 1].children[j]
},
({
    let new_child = other
        .continuations[self.level - 1]
        .children[other.continuations[self.level - 1].idx as int]
        .unwrap();
    let new_path = other
        .continuations[self.level - 1]
        .path()
        .push_tail(other.continuations[self.level - 1].idx as usize);
    new_child
        .tree_predicate_map(
            new_path,
            PageTableOwner::<C>::metaregion_sound_pred(regions),
        )
}),
other.continuations[self.level - 1].entry_own.metaregion_sound(regions),
ensures
other.inv(),
other.metaregion_sound(regions),
Source

pub proof fn map_branch_none_inv_holds(self, owner0: Self)

requires
owner0.inv(),
self.level == owner0.level,
self.va == owner0.va,
self.guard_level == owner0.guard_level,
self.prefix == owner0.prefix,
self.popped_too_high == owner0.popped_too_high,
forall |i: int| {
    self.level <= i < NR_LEVELS
        ==> #[trigger] self.continuations[i] == owner0.continuations[i]
},
self.continuations[self.level - 1].inv(),
self.continuations[self.level - 1].all_some(),
self.continuations[self.level - 1].idx == owner0.continuations[owner0.level - 1].idx,
self.continuations[self.level - 1].entry_own.parent_level
    == owner0.continuations[owner0.level - 1].entry_own.parent_level,
self.continuations[self.level - 1].guard_perm.value().inner.inner@.ptr.addr()
    == owner0.continuations[owner0.level - 1].guard_perm.value().inner.inner@.ptr.addr(),
self.continuations[self.level - 1].path()
    == owner0.continuations[owner0.level - 1].path(),
self.va.index[self.level - 1] == self.continuations[self.level - 1].idx,
self.continuations.dom() =~= owner0.continuations.dom(),
ensures
self.inv(),
Source

pub proof fn map_branch_none_no_new_mappings(self, owner0: Self)

requires
owner0.inv(),
self.inv(),
self.level == owner0.level,
self.va == owner0.va,
forall |i: int| {
    self.level <= i < NR_LEVELS
        ==> #[trigger] self.continuations[i] == owner0.continuations[i]
},
owner0
    .continuations[owner0.level - 1]
    .children[owner0.continuations[owner0.level - 1].idx as int] is Some,
owner0
    .continuations[owner0.level - 1]
    .children[owner0.continuations[owner0.level - 1].idx as int]
    .unwrap()
    .value
    .is_absent(),
self
    .continuations[self.level - 1]
    .children[self.continuations[self.level - 1].idx as int] is Some,
self
    .continuations[self.level - 1]
    .children[self.continuations[self.level - 1].idx as int]
    .unwrap()
    .value
    .is_node(),
self.continuations[self.level - 1].path()
    == owner0.continuations[owner0.level - 1].path(),
forall |j: int| {
    0 <= j < NR_ENTRIES && j != owner0.continuations[owner0.level - 1].idx as int
        ==> #[trigger] self.continuations[self.level - 1].children[j]
            == owner0.continuations[owner0.level - 1].children[j]
},
PageTableOwner(
        self
            .continuations[self.level - 1]
            .children[self.continuations[self.level - 1].idx as int]
            .unwrap(),
    )
    .view_rec(
        self
            .continuations[self.level - 1]
            .path()
            .push_tail(self.continuations[self.level - 1].idx as usize),
    ) =~= Set::<Mapping>::empty(),
ensures
self.view_mappings() =~= owner0.view_mappings(),

After alloc_if_none (absent->node), view_mappings is unchanged (both contribute zero mappings).

Source

pub proof fn map_branch_none_cur_entry_absent(self)

requires
self.inv(),
forall |i: int| {
    0 <= i < NR_ENTRIES
        ==> #[trigger] self.continuations[self.level - 1].children[i] is Some
            && self.continuations[self.level - 1].children[i].unwrap().value.is_absent()
},
ensures
self.cur_entry_owner().is_absent(),

After map_branch_none (alloc_if_none + push_level), the current entry is absent.

Proof: alloc_if_none creates an empty PT node where all children are absent (allocated_empty_node_owner line 172). push_level enters one of these children, so cur_entry_owner().is_absent() holds.

Source

pub proof fn cursor_path_nesting(self, i: int, j: int)

requires
self.inv(),
self.level - 1 <= j < i,
i < NR_LEVELS,
ensures
self.continuations[j].path().len() as int > self.continuations[i].path().len() as int,
self.continuations[j].path().index(self.continuations[i].path().len() as int)
    == self.continuations[i].idx,
Source

pub proof fn lemma_page_size_spec_5_eq_pow2_48()

ensures
page_size_spec(5) == pow2(48nat) as usize,
Source

pub proof fn jump_not_in_node_level_lt_guard_minus_one( self, level: PagingLevel, va: Vaddr, node_start: Vaddr, )

requires
self.inv(),
self.locked_range().start <= va < self.locked_range().end,
1 <= level,
level + 1 <= self.guard_level,
self.locked_range().start <= node_start,
node_start + page_size((level + 1) as PagingLevel) <= self.locked_range().end,
!(node_start <= va && va < node_start + page_size((level + 1) as PagingLevel)),
ensures
level + 1 < self.guard_level,
Source§

impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>

Source

pub open spec fn max_steps_subtree(level: usize) -> nat

{
    if level <= 1 {
        NR_ENTRIES as nat
    } else {
        (NR_ENTRIES as nat) * (Self::max_steps_subtree((level - 1) as usize) + 1)
    }
}

The number of steps it will take to walk through every node of a full page table at level level

Source

pub open spec fn max_steps_partial(self, level: usize) -> nat

{
    if level == NR_LEVELS {
        0
    } else {
        let cont = self.continuations[(level - 1) as int];
        let steps = Self::max_steps_subtree(level) * ((NR_ENTRIES - cont.idx) as nat);
        let remaining_steps = self.max_steps_partial((level + 1) as usize);
        steps + remaining_steps
    }
}

The number of steps it will take to walk through the remaining entries of the page table starting at the given level.

Source

pub open spec fn max_steps(self) -> nat

{ self.max_steps_partial(self.level as usize) }
Source

pub proof fn max_steps_subtree_positive(level: usize)

ensures
Self::max_steps_subtree(level) > 0,
Source

pub proof fn max_steps_partial_eq(self, other: Self, start: usize)

requires
1 <= start <= NR_LEVELS,
forall |k: int| {
    start - 1 <= k < NR_LEVELS
        ==> #[trigger] self.continuations[k].idx == other.continuations[k].idx
},
ensures
self.max_steps_partial(start) == other.max_steps_partial(start),

Two owners with the same idx values from start upward have the same max_steps_partial.

Source

pub proof fn max_steps_partial_inv(self, other: Self, level: usize)

requires
self.inv(),
other.inv(),
self.level == other.level,
self.level <= level <= NR_LEVELS,
forall |i: int| {
    self.level - 1 <= i < NR_LEVELS
        ==> self.continuations[i].idx == other.continuations[i].idx
},
ensures
self.max_steps_partial(level) == other.max_steps_partial(level),
Source

pub open spec fn push_level_owner_spec(self, guard_perm: GuardPerm<'rcu, C>) -> Self

{
    let cont = self.continuations[self.level - 1];
    let (child, cont) = cont
        .make_cont_spec(self.va.index[self.level - 2] as usize, guard_perm);
    let new_continuations = self.continuations.insert(self.level - 1, cont);
    let new_continuations = new_continuations.insert(self.level - 2, child);
    let new_level = (self.level - 1) as u8;
    Self {
        continuations: new_continuations,
        level: new_level,
        popped_too_high: false,
        ..self
    }
}
Source

pub proof fn push_level_owner_decreases_steps(self, guard_perm: GuardPerm<'rcu, C>)

requires
self.inv(),
self.level > 0,
ensures
self.push_level_owner_spec(guard_perm).max_steps() < self.max_steps(),
Source

pub proof fn push_level_owner_preserves_va(self, guard_perm: GuardPerm<'rcu, C>)

requires
self.inv(),
self.level > 1,
ensures
self.push_level_owner_spec(guard_perm).va == self.va,
self.push_level_owner_spec(guard_perm).continuations[self.level - 2].idx
    == self.va.index[self.level - 2],
Source

pub proof fn push_level_owner_preserves_mappings(self, guard_perm: GuardPerm<'rcu, C>)

requires
self.inv(),
self.level > 1,
self.cur_entry_owner().is_node(),
ensures
self.push_level_owner_spec(guard_perm)@.mappings == self@.mappings,
Source

pub proof fn push_level_owner_preserves_inv(self, guard_perm: GuardPerm<'rcu, C>)

requires
self.inv(),
self.level > 1,
!self.popped_too_high,
self.level < self.guard_level,
self.cur_entry_owner().is_node(),
self.cur_entry_owner().node.unwrap().relate_guard_perm(guard_perm),
forall |i: int| {
    self.level - 1 <= i < NR_LEVELS
        ==> self.continuations[i].guard_perm.value().inner.inner@.ptr.addr()
            != guard_perm.value().inner.inner@.ptr.addr()
},
ensures
self.push_level_owner_spec(guard_perm).inv(),
Source

pub proof fn push_level_owner_preserves_invs( self, guard_perm: GuardPerm<'rcu, C>, regions: MetaRegionOwners, guards: Guards<'rcu, C>, )

requires
self.inv(),
self.level > 1,
!self.popped_too_high,
self.level < self.guard_level,
self.only_current_locked(guards),
self.nodes_locked(guards),
self.metaregion_sound(regions),
self.cur_entry_owner().is_node(),
self.cur_entry_owner().node.unwrap().relate_guard_perm(guard_perm),
guards.lock_held(guard_perm.value().inner.inner@.ptr.addr()),
ensures
self.push_level_owner_spec(guard_perm).inv(),
self.push_level_owner_spec(guard_perm).children_not_locked(guards),
self.push_level_owner_spec(guard_perm).nodes_locked(guards),
self.push_level_owner_spec(guard_perm).metaregion_sound(regions),
Source

pub proof fn push_level_owner(tracked &mut self, tracked guard_perm: Tracked<GuardPerm<'rcu, C>>)

requires
old(self).inv(),
old(self).level > 1,
ensures
*final(self) == old(self).push_level_owner_spec(guard_perm@),
Source

pub open spec fn pop_level_owner_spec(self) -> (Self, GuardPerm<'rcu, C>)

{
    let child = self.continuations[self.level - 1];
    let cont = self.continuations[self.level as int];
    let (new_cont, guard_perm) = cont.restore_spec(child);
    let new_continuations = self.continuations.insert(self.level as int, new_cont);
    let new_continuations = new_continuations.remove(self.level - 1);
    let new_level = (self.level + 1) as u8;
    let popped_too_high = if new_level >= self.guard_level { true } else { false };
    (
        Self {
            continuations: new_continuations,
            level: new_level,
            popped_too_high: popped_too_high,
            ..self
        },
        guard_perm,
    )
}
Source

pub proof fn pop_level_owner_preserves_inv(self)

requires
self.inv(),
self.level < NR_LEVELS,
self.in_locked_range(),
ensures
self.pop_level_owner_spec().0.inv(),
Source

pub proof fn pop_level_owner_preserves_invs( self, guards: Guards<'rcu, C>, regions: MetaRegionOwners, )

requires
self.inv(),
self.level < NR_LEVELS,
self.in_locked_range(),
self.children_not_locked(guards),
self.nodes_locked(guards),
self.metaregion_sound(regions),
ensures
self.pop_level_owner_spec().0.inv(),
self.pop_level_owner_spec().0.only_current_locked(guards),
self.pop_level_owner_spec().0.nodes_locked(guards),
self.pop_level_owner_spec().0.metaregion_sound(regions),
Source

pub proof fn set_va_preserves_inv(self, new_va: AbstractVaddr)

requires
self.inv(),
!self.popped_too_high,
self.level < self.guard_level,
new_va.inv(),
new_va.offset == 0,
new_va.leading_bits == self.prefix.leading_bits,
forall |i: int| self.level - 1 <= i < NR_LEVELS ==> new_va.index[i] == self.va.index[i],
forall |i: int| {
    self.guard_level - 1 <= i < NR_LEVELS ==> new_va.index[i] == self.prefix.index[i]
},
ensures
self.set_va_spec(new_va).inv(),

Update va to a new value that shares the same indices at levels >= self.level. This preserves invariants because:

  1. The new va satisfies va.inv()
  2. The indices at levels >= level match the continuation indices
  3. in_locked_range/above_locked_range depend on va but the preconditions ensure consistency
Source

pub proof fn pop_level_owner(tracked &mut self) -> tracked guard_perm : GuardPerm<'rcu, C>

requires
old(self).inv(),
old(self).level < NR_LEVELS,
ensures
*final(self) == old(self).pop_level_owner_spec().0,
guard_perm == old(self).pop_level_owner_spec().1,
Source

pub open spec fn move_forward_owner_spec(self) -> Self

recommends
self.inv(),
self.level < NR_LEVELS,
self.in_locked_range(),
{
    if self.index() + 1 < NR_ENTRIES {
        self.inc_index().zero_below_level()
    } else if self.level < NR_LEVELS {
        self.pop_level_owner_spec().0.move_forward_owner_spec()
    } else {
        Self {
            popped_too_high: false,
            ..self
        }
    }
}
Source

pub proof fn move_forward_increases_va(self)

requires
self.inv(),
self.level <= NR_LEVELS,
self.in_locked_range(),
!self.popped_too_high,
ensures
self.move_forward_owner_spec().va.to_vaddr() > self.va.to_vaddr(),
Source

pub proof fn move_forward_not_popped_too_high(self)

requires
self.inv(),
self.level <= NR_LEVELS,
self.in_locked_range(),
ensures
!self.move_forward_owner_spec().popped_too_high,
Source

pub proof fn move_forward_owner_decreases_steps(self)

requires
self.inv(),
self.level <= NR_LEVELS,
self.in_locked_range(),
!self.popped_too_high,
ensures
self.move_forward_owner_spec().max_steps() < self.max_steps(),
Source

pub proof fn zero_below_level_eq_align_down(self)

requires
self.va.inv(),
self.va.offset == 0,
1 <= self.level <= NR_LEVELS,
ensures
self.zero_below_level().va == self.va.align_down(self.level as int),

Trivial: zero_below_level is defined as Self { va: self.va.align_down(level), ..self }.

Source

pub proof fn move_forward_va_is_align_up(self)

requires
self.inv(),
self.level <= NR_LEVELS,
self.in_locked_range(),
!self.popped_too_high,
ensures
self.move_forward_owner_spec().va == self.va.align_up(self.level as int),
Source

pub proof fn pop_level_owner_preserves_mappings(self)

requires
self.inv(),
self.level < NR_LEVELS,
self.in_locked_range(),
ensures
self.pop_level_owner_spec().0@.mappings == self@.mappings,

After popping a level, the total view_mappings is preserved. The restored parent at index self.level absorbs the child’s mappings, and both are within the view_mappings range [self.level, NR_LEVELS).

Source

pub proof fn move_forward_owner_preserves_mappings(self)

requires
self.inv(),
self.in_locked_range(),
ensures
self.move_forward_owner_spec()@.mappings == self@.mappings,
Source§

impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>

Source

pub open spec fn no_node_at_idx(self, idx: usize) -> bool

{
    &&& self
        .map_full_tree(|e: EntryOwner<C>, _p: TreePath<NR_ENTRIES>| {
            e.is_node() && e.meta_slot_paddr() is Some
                ==> frame_to_index(e.meta_slot_paddr().unwrap()) != idx
        })
    &&& forall |i: int| {
        self.level - 1 <= i < NR_LEVELS
            ==> {
                let e = self.continuations[i].entry_own;
                e.is_node() && e.meta_slot_paddr() is Some
                    ==> frame_to_index(e.meta_slot_paddr().unwrap()) != idx
            }
    }

}

Tree-wide predicate: no page-table node in either the cursor’s tree children or its continuation path has metadata slot index idx. Used as the “sanity” precondition of the path-insert preservation lemma: the only kind of entry it can touch with the new path is a frame. Callers satisfy it by observing that page-table node metadata lives in FRAME_METADATA_RANGE, which is disjoint from any data-frame paddr (where paths_in_pt inserts happen).

Source

pub proof fn and_map_full_tree( self, f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>, guard: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>, )

requires
self.inv(),
self.map_full_tree(f),
self.map_full_tree(guard),
ensures
self.map_full_tree(|e: EntryOwner<C>, p: TreePath<NR_ENTRIES>| f(e, p) && guard(e, p)),

Pointwise conjunction of two tree-wide predicates: if self.map_full_tree(f) and self.map_full_tree(guard) hold, then self.map_full_tree(f && guard) holds. A thin wrapper around OwnerSubtree::map_implies_and applied per continuation + per child; extracted so callers don’t have to re-derive the same nested assert forall ... by { map_implies_and(...) } block.

Source

pub proof fn no_node_at_idx_from_slot_key( self, regions: MetaRegionOwners, changed_idx: usize, )

requires
self.inv(),
regions.inv(),
self.metaregion_sound(regions),
regions.slots.contains_key(changed_idx),
ensures
self.no_node_at_idx(changed_idx),

Discharge no_node_at_idx(changed_idx) from the observation that changed_idx is the index of a slot currently sitting in the free pool (regions.slots.contains_key(changed_idx)). The argument uses EntryOwner::active_entry_not_in_free_pool: an active node’s metadata slot is never simultaneously in the free pool, so any node in the cursor tree must have a different slot index than changed_idx.

Callers doing a paths_in_pt.insert at a frame’s data-slot (e.g., map and the huge-page split) use this helper to satisfy the precondition of Self::metaregion_preserved_under_path_insert.

Source

pub proof fn metaregion_preserved_under_path_insert( self, regions0: MetaRegionOwners, regions1: MetaRegionOwners, changed_idx: usize, new_path: TreePath<NR_ENTRIES>, )

requires
self.inv(),
self.metaregion_sound(regions0),
regions0.inv(),
regions0.slot_owners.contains_key(changed_idx),
regions1.slots == regions0.slots,
regions1.slot_owners.dom() =~= regions0.slot_owners.dom(),
forall |i: usize| i != changed_idx ==> regions0.slot_owners[i] == regions1.slot_owners[i],
regions1.slot_owners[changed_idx].inner_perms
    == regions0.slot_owners[changed_idx].inner_perms,
regions1.slot_owners[changed_idx].self_addr
    == regions0.slot_owners[changed_idx].self_addr,
regions1.slot_owners[changed_idx].raw_count
    == regions0.slot_owners[changed_idx].raw_count,
regions1.slot_owners[changed_idx].usage == regions0.slot_owners[changed_idx].usage,
regions1.slot_owners[changed_idx].paths_in_pt
    == regions0.slot_owners[changed_idx].paths_in_pt.insert(new_path),
self.no_node_at_idx(changed_idx),
ensures
self.metaregion_sound(regions1),

Preservation of the cursor-level metaregion invariants when the only change to regions is the insertion of a new path into the paths_in_pt set at a single slot.

Source§

impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>

Source

pub proof fn cur_subtree_eq_filtered_mappings_path(self)

requires
self.inv(),
ensures
({
    let subtree_va = vaddr_of::<C>(self.cur_subtree().value.path) as int;
    let size = page_size(self.level) as int;
    PageTableOwner(self.cur_subtree())@.mappings
        == self@
            .mappings
            .filter(|m: Mapping| subtree_va <= m.va_range.start < subtree_va + size)
}),

The current subtree’s mappings equal the filter over [subtree_va, subtree_va + page_size(level)) where subtree_va = vaddr(cur_subtree path).

Source

pub proof fn cur_subtree_eq_filtered_mappings(self)

requires
self.inv(),
ensures
({
    let start = nat_align_down(self@.cur_va as nat, page_size(self.level) as nat)
        as Vaddr;
    let size = page_size(self.level);
    PageTableOwner(self.cur_subtree())@.mappings
        == self@.mappings.filter(|m: Mapping| start <= m.va_range.start < start + size)
}),

Version using nat_align_down(cur_va, page_size(level)) in the filter. Bridge: nat_align_down(cur_va, ps) as int == vaddr(cur_path) + leading_bits * 2^48.

Source

pub proof fn subtree_va_ranges_disjoint(self, j: int)

requires
self.inv(),
0 <= j < NR_ENTRIES,
j != self.index(),
self.continuations[self.level - 1].children[j] is Some,
ensures
vaddr(self.continuations[self.level - 1].path().push_tail(j as usize)) as int
    + self.va.leading_bits * 0x1_0000_0000_0000int
    + page_size(self.level as PagingLevel) as int <= self.cur_va() as int
    || (self.cur_va() as int)
        < vaddr(self.continuations[self.level - 1].path().push_tail(j as usize)) as int
            + self.va.leading_bits * 0x1_0000_0000_0000int,

Subtrees at different indices have disjoint VA ranges.

Source

pub proof fn higher_level_children_disjoint(self, i: int, j: int)

requires
self.inv(),
self.level - 1 < i < NR_LEVELS,
0 <= j < NR_ENTRIES,
j != self.continuations[i].idx,
self.continuations[i].children[j] is Some,
ensures
vaddr(self.continuations[i].path().push_tail(j as usize)) as int
    + self.va.leading_bits * 0x1_0000_0000_0000int
    + page_size((i + 1) as PagingLevel) as int <= self.cur_va() as int
    || (self.cur_va() as int)
        < vaddr(self.continuations[i].path().push_tail(j as usize)) as int
            + self.va.leading_bits * 0x1_0000_0000_0000int,

Children of higher-level continuations have VA ranges that don’t include cur_va, because cur_va’s indices at those levels match the path to the current position.

Source

pub proof fn mapping_covering_cur_va_from_cur_subtree(self, m: Mapping)

requires
self.inv(),
self.view_mappings().contains(m),
m.va_range.start <= self.cur_va() < m.va_range.end,
ensures
PageTableOwner(self.cur_subtree()).view_rec(self.cur_subtree().value.path).contains(m),

Any mapping that covers cur_va must come from the current subtree. This follows from the disjointness of VA ranges and the fact that cur_va falls within the current subtree’s VA range.

Source

pub proof fn view_mappings_replace_lowest( old_self: Self, new_self: Self, old_cont: CursorContinuation<'rcu, C>, new_cont: CursorContinuation<'rcu, C>, )

requires
old_self.inv(),
new_self.inv(),
old_self.level == new_self.level,
old_self.continuations[old_self.level - 1] == old_cont,
new_self.continuations[new_self.level - 1] == new_cont,
forall |i: int| {
    old_self.level <= i < NR_LEVELS
        ==> old_self.continuations[i] == new_self.continuations[i]
},
ensures
new_self.view_mappings()
    == (old_self.view_mappings() - old_cont.view_mappings())
        .union(new_cont.view_mappings()),

Combined replace: swap the lowest continuation, relating old and new mapping sets. Avoids the Map::remove phantom key issue by requiring both old and new to have inv().

Source

pub proof fn as_page_table_owner_preserves_view_mappings(self)

requires
self.inv(),
ensures
self.as_page_table_owner().view_rec(self.continuations[3].path()) == self.view_mappings(),
self.as_page_table_owner().0.inv(),
self.as_page_table_owner().0.level == self.continuations[3].tree_level,
self.as_page_table_owner().pt_inv(),
Source

pub proof fn view_mapping_inv(self)

requires
self.inv(),
ensures
forall |m: Mapping| self.view_mappings().contains(m) ==> #[trigger] m.inv(),

Every mapping in the cursor view satisfies Mapping::inv().

Collapses the cursor view into a single-root view_rec and applies view_rec_mapping_inv. Inherits the latter’s two narrow assumes on vaddr(path) arithmetic.

Source

pub proof fn view_mapping_page_size_valid(self)

requires
self.inv(),
ensures
forall |m: Mapping| {
    #[trigger] self.view_mappings().contains(m)
        ==> set![4096usize, 2097152usize, 1073741824usize].contains(m.page_size)
},

Every mapping in the cursor view has page_size ∈ {4K, 2M, 1G}.

Uses the standard collapse trick: view_mappings equals as_page_table_owner().view_rec(continuations[3].path()), then applies view_rec_mapping_page_size. The root’s parent_level == 5 == INC_LEVELS is given by the cursor invariant (continuations[3].entry_own.parent_level == 5).

Source

pub proof fn view_mappings_finite(self)

requires
self.inv(),
ensures
self.view_mappings().finite(),

Finiteness of the cursor view.

Collapses the union-over-continuations view_mappings into a single view_rec rooted at the reconstructed root page table, then uses view_rec_finite (bounded depth / branching).

Source

pub proof fn as_page_table_owner_view_non_overlapping(self)

requires
self.inv(),
ensures
self@.non_overlapping(),

Non-overlapping mappings in the cursor view.

Collapses the union-over-continuations view_mappings into a single view_rec rooted at the reconstructed root page table, then applies view_rec_disjoint_vaddrs on that single subtree. Follows from tree structure alone.

Source§

impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>

Source

pub open spec fn node_unlocked( guards: Guards<'rcu, C>, ) -> FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>

{
    |owner: EntryOwner<C>, path: TreePath<NR_ENTRIES>| {
        owner.is_node() ==> guards.unlocked(owner.node.unwrap().meta_perm.addr())
    }
}
Source

pub open spec fn node_unlocked_except( guards: Guards<'rcu, C>, addr: usize, ) -> FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>

{
    |owner: EntryOwner<C>, path: TreePath<NR_ENTRIES>| {
        owner.is_node()
            ==> (owner.node.unwrap().meta_perm.addr() != addr
                ==> guards.unlocked(owner.node.unwrap().meta_perm.addr()))
    }
}
Source

pub open spec fn map_full_tree( self, f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>, ) -> bool

{
    forall |i: int| {
        self.level - 1 <= i < NR_LEVELS ==> { self.continuations[i].map_children(f) }
    }
}
Source

pub open spec fn map_only_children( self, f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>, ) -> bool

{
    forall |i: int| {
        self.level - 1 <= i < NR_LEVELS ==> self.continuations[i].map_children(f)
    }
}
Source

pub open spec fn children_not_locked(self, guards: Guards<'rcu, C>) -> bool

{ self.map_only_children(Self::node_unlocked(guards)) }
Source

pub open spec fn only_current_locked(self, guards: Guards<'rcu, C>) -> bool

{
    self.map_only_children(
        Self::node_unlocked_except(
            guards,
            self.cur_entry_owner().node.unwrap().meta_perm.addr(),
        ),
    )
}
Source

pub proof fn never_drop_restores_children_not_locked( self, guard: PageTableGuard<'rcu, C>, guards0: Guards<'rcu, C>, guards1: Guards<'rcu, C>, )

requires
self.inv(),
self.only_current_locked(guards0),
<PageTableGuard<'rcu, C> as TrackDrop>::constructor_requires(guard, guards0),
<PageTableGuard<'rcu, C> as TrackDrop>::constructor_ensures(guard, guards0, guards1),
self.cur_entry_owner().is_node(),
guard.inner.inner@.ptr.addr() == self.cur_entry_owner().node.unwrap().meta_perm.addr(),
ensures
self.children_not_locked(guards1),
Source

pub proof fn never_drop_restores_nodes_locked( self, guard: PageTableGuard<'rcu, C>, guards0: Guards<'rcu, C>, guards1: Guards<'rcu, C>, )

requires
self.inv(),
self.nodes_locked(guards0),
<PageTableGuard<'rcu, C> as TrackDrop>::constructor_requires(guard, guards0),
<PageTableGuard<'rcu, C> as TrackDrop>::constructor_ensures(guard, guards0, guards1),
forall |i: int| {
    self.level - 1 <= i < NR_LEVELS
        ==> self.continuations[i].guard_perm.value().inner.inner@.ptr.addr()
            != guard.inner.inner@.ptr.addr()
},
ensures
self.nodes_locked(guards1),

After dropping the guard for the popped level, nodes_locked is preserved for the new (higher-level) owner, because the dropped guard’s address is not among those checked by nodes_locked (which covers levels >= self.level - 1).

Source

pub open spec fn nodes_locked(self, guards: Guards<'rcu, C>) -> bool

{
    forall |i: int| {
        self.level - 1 <= i < NR_LEVELS ==> { self.continuations[i].node_locked(guards) }
    }
}

After a protect operation that only modifies frame.prop of the current entry, CursorOwner::inv() and metaregion_sound are preserved.

Safety: protect changes only frame.prop and updates parent.children_perm to match. EntryOwner::inv() is preserved (from protect postcondition). metaregion_sound is preserved because it doesn’t use frame.prop. rel_children holds via match_pte (from protect’s wf/node_matching postconditions).

The axiom requires only the semantic properties of the modified entry that are checked by inv and metaregion_sound; the structural identity of other continuations is trusted to hold from the tracked restore operations in the caller.

Source

pub open spec fn index(self) -> usize

{ self.continuations[self.level - 1].idx }
Source

pub open spec fn inc_index(self) -> Self

{
    Self {
        continuations: self
            .continuations
            .insert(self.level - 1, self.continuations[self.level - 1].inc_index()),
        va: AbstractVaddr {
            index: self
                .va
                .index
                .insert(
                    self.level - 1,
                    self.continuations[self.level - 1].inc_index().idx as int,
                ),
            ..self.va
        },
        popped_too_high: false,
        ..self
    }
}
Source

pub proof fn do_inc_index(tracked &mut self)

requires
old(self).inv(),
old(self).level <= old(self).guard_level,
old(self).continuations[old(self).level - 1].idx + 1 < NR_ENTRIES,
old(self).level == NR_LEVELS
    ==> (old(self).continuations[old(self).level - 1].idx + 1)
        <= C::TOP_LEVEL_INDEX_RANGE_spec().end,
ensures
final(self).inv(),
*final(self) == old(self).inc_index(),
Source

pub proof fn inv_continuation(self, i: int)

requires
self.inv(),
self.level - 1 <= i <= NR_LEVELS - 1,
ensures
self.continuations.contains_key(i),
self.continuations[i].inv(),
self.continuations[i].children.len() == NR_ENTRIES,
Source

pub open spec fn view_mappings(self) -> Set<Mapping>

{
    Set::new(|m: Mapping| {
        exists |i: int| {
            self.level - 1 <= i < NR_LEVELS
                && self.continuations[i].view_mappings().contains(m)
        }
    })
}
Source

pub open spec fn as_page_table_owner(self) -> PageTableOwner<C>

{
    if self.level == 1 {
        let l1 = self.continuations[0];
        let l2 = self.continuations[1].restore_spec(l1).0;
        let l3 = self.continuations[2].restore_spec(l2).0;
        let l4 = self.continuations[3].restore_spec(l3).0;
        l4.as_page_table_owner()
    } else if self.level == 2 {
        let l2 = self.continuations[1];
        let l3 = self.continuations[2].restore_spec(l2).0;
        let l4 = self.continuations[3].restore_spec(l3).0;
        l4.as_page_table_owner()
    } else if self.level == 3 {
        let l3 = self.continuations[2];
        let l4 = self.continuations[3].restore_spec(l3).0;
        l4.as_page_table_owner()
    } else {
        let l4 = self.continuations[3];
        l4.as_page_table_owner()
    }
}
Source

pub open spec fn cur_entry_owner(self) -> EntryOwner<C>

{ self.cur_subtree().value }
Source

pub open spec fn cur_subtree(self) -> OwnerSubtree<C>

{ self.continuations[self.level - 1].children[self.index() as int].unwrap() }
Source

pub proof fn cur_frame_clone_requires( self, item: C::Item, pa: Paddr, level: PagingLevel, prop: PageProperty, regions: MetaRegionOwners, )

requires
self.inv(),
regions.inv(),
self.metaregion_sound(regions),
self.cur_entry_owner().is_frame(),
pa == self.cur_entry_owner().frame.unwrap().mapped_pa,
C::item_from_raw_spec(pa, level, prop) == item,
crate::mm::frame::meta::has_safe_slot(pa),
regions.slot_owners[frame_to_index(pa)].inner_perms.ref_count.value() + 1
    < crate::specs::mm::frame::meta_owners::REF_COUNT_MAX,
ensures
item.clone_requires(regions),

Axiom: the item reconstructed from the current frame’s physical address satisfies clone_requires.

Safety: When metaregion_sound holds for a frame entry, the item reconstructed via item_from_raw_spec(pa, ...) is the original frame item. The frame’s slot permission (owned by the cursor) has the correct address, is initialised, and its ref count is in the valid clonable range (> 0, < REF_COUNT_MAX), so clone_requires is satisfied.

This is a trait-level axiom: C::Item::clone_requires is fully generic in the PageTableConfig trait, so the postcondition cannot be discharged without knowing the concrete item type. It holds for every PageTableConfig used in ostd because item_from_raw_spec always returns a freshly-constructed Frame<M> handle whose Frame::<M>::clone_requires unfolds to slot-address equality, initialisation, and a bounded ref-count — all delivered by metaregion_sound for frame entries.

Source

pub proof fn clone_item_preserves_invariants( self, old_regions: MetaRegionOwners, new_regions: MetaRegionOwners, idx: usize, )

requires
self.inv(),
self.metaregion_sound(old_regions),
old_regions.inv(),
self.cur_entry_owner().is_frame(),
idx == frame_to_index(self.cur_entry_owner().frame.unwrap().mapped_pa),
old_regions.slot_owners.contains_key(idx),
new_regions.slot_owners.contains_key(idx),
new_regions.slot_owners[idx].inner_perms.ref_count.value()
    == old_regions.slot_owners[idx].inner_perms.ref_count.value() + 1,
new_regions.slot_owners[idx].inner_perms.ref_count.id()
    == old_regions.slot_owners[idx].inner_perms.ref_count.id(),
new_regions.slot_owners[idx].inner_perms.storage
    == old_regions.slot_owners[idx].inner_perms.storage,
new_regions.slot_owners[idx].inner_perms.vtable_ptr
    == old_regions.slot_owners[idx].inner_perms.vtable_ptr,
new_regions.slot_owners[idx].inner_perms.in_list
    == old_regions.slot_owners[idx].inner_perms.in_list,
new_regions.slot_owners[idx].paths_in_pt == old_regions.slot_owners[idx].paths_in_pt,
new_regions.slot_owners[idx].self_addr == old_regions.slot_owners[idx].self_addr,
new_regions.slot_owners[idx].raw_count == old_regions.slot_owners[idx].raw_count,
new_regions.slot_owners[idx].usage == old_regions.slot_owners[idx].usage,
new_regions.slot_owners.dom() == old_regions.slot_owners.dom(),
forall |i: usize| {
    i != idx && old_regions.slot_owners.contains_key(i)
        ==> new_regions.slot_owners[i] == old_regions.slot_owners[i]
},
new_regions.slots == old_regions.slots,
0 < old_regions.slot_owners[idx].inner_perms.ref_count.value(),
old_regions.slot_owners[idx].inner_perms.ref_count.value() + 1 < REF_COUNT_MAX,
ensures
new_regions.inv(),
self.metaregion_sound(new_regions),

Incrementing the ref count of the current frame preserves regions.inv() and self.metaregion_sound(new_regions).

Source

pub proof fn new_child_mappings_eq_target( self, new_subtree: OwnerSubtree<C>, pa: Paddr, level: PagingLevel, prop: PageProperty, )

requires
self.inv(),
level == self.level,
new_subtree.inv(),
new_subtree.value.is_frame(),
new_subtree.value.path
    == self
        .continuations[self.level as int - 1]
        .path()
        .push_tail(self.continuations[self.level as int - 1].idx as usize),
new_subtree.value.frame.unwrap().mapped_pa == pa,
new_subtree.value.frame.unwrap().prop == prop,
ensures
PageTableOwner(new_subtree)@.mappings
    == set![
        Mapping { va_range : self @.cur_slot_range(page_size(level)), pa_range : pa.. (pa
        + page_size(level)) as usize, page_size : page_size(level), property : prop, }
    ],

A new frame subtree at the current position has mappings equal to the singleton mapping covering the current slot range.

Source

pub open spec fn locked_range(self) -> Range<Vaddr>

{
    let start = self.prefix.align_down(self.guard_level as int).to_vaddr();
    let end = self.prefix.align_up(self.guard_level as int).to_vaddr();
    Range { start, end }
}
Source

pub open spec fn in_locked_range(self) -> bool

{ self.locked_range().start <= self.va.to_vaddr() < self.locked_range().end }
Source

pub open spec fn above_locked_range(self) -> bool

{ self.va.to_vaddr() >= self.locked_range().end }
Source

pub proof fn inc_at_guard_level_above_locked_range( old_va: AbstractVaddr, prefix: AbstractVaddr, guard_level: u8, level: u8, new_va_val: Vaddr, )

requires
old_va.inv(),
prefix.inv(),
1 <= guard_level <= NR_LEVELS,
level == guard_level,
new_va_val == old_va.to_vaddr() + page_size(level as PagingLevel),
prefix.align_down(guard_level as int).to_vaddr() <= old_va.to_vaddr(),
old_va.to_vaddr() < prefix.align_up(guard_level as int).to_vaddr(),
ensures
new_va_val >= prefix.align_up(guard_level as int).to_vaddr(),

After incrementing at guard_level, the new VA >= locked_range.end.

Source

pub proof fn prefix_in_locked_range(self)

requires
self.inv(),
!self.popped_too_high,
self.level < self.guard_level,
ensures
self.in_locked_range(),
Source

pub proof fn in_locked_range_prefix_match(self)

requires
self.inv(),
self.prefix.inv(),
1 <= self.guard_level <= NR_LEVELS,
self.in_locked_range(),
ensures
forall |i: int| {
    self.guard_level <= i < NR_LEVELS ==> self.va.index[i] == self.prefix.index[i]
},

Reverse of prefix_in_locked_range: if va is in the locked range, then va shares upper indices with prefix.

Source

pub proof fn in_locked_range_level_lt_nr_levels(self)

requires
self.inv(),
self.in_locked_range(),
!self.popped_too_high,
ensures
self.level < NR_LEVELS,
Source

pub proof fn in_locked_range_top_index_lt_top_end(self)

requires
self.inv(),
self.in_locked_range(),
!self.popped_too_high,
ensures
self.va.index[NR_LEVELS - 1] < C::TOP_LEVEL_INDEX_RANGE_spec().end,

When the cursor is in the locked range and not popped, its top-level index is strictly less than TOP_LEVEL_INDEX_RANGE.end (the relaxed inv only allows <=, but the operational state is strict).

Source

pub proof fn in_locked_range_level_lt_guard_level(self)

requires
self.inv(),
self.in_locked_range(),
!self.popped_too_high,
ensures
self.level < self.guard_level,
Source

pub proof fn node_within_locked_range(self, level: PagingLevel)

requires
self.inv(),
self.in_locked_range(),
1 <= level < self.guard_level,
ensures
self.locked_range().start
    <= nat_align_down(
        self.va.to_vaddr() as nat,
        page_size((level + 1) as PagingLevel) as nat,
    ) as usize,
nat_align_down(self.va.to_vaddr() as nat, page_size((level + 1) as PagingLevel) as nat)
    as usize + page_size((level + 1) as PagingLevel) <= self.locked_range().end,

The node at level+1 containing va fits within the locked range.

Source

pub proof fn locked_range_page_aligned(self)

requires
self.inv(),
ensures
self.locked_range().end % PAGE_SIZE == 0,
self.locked_range().start % PAGE_SIZE == 0,
Source

pub proof fn cur_subtree_inv(self)

requires
self.inv(),
ensures
self.cur_subtree().inv(),
Source

pub proof fn cur_entry_absent_not_present(self)

requires
self.inv(),
self.cur_entry_owner().is_absent(),
ensures
!self@.present(),

If the current entry is absent, !self@.present().

Source

pub proof fn cur_subtree_empty_not_present(self)

requires
self.inv(),
PageTableOwner(self.cur_subtree()).view_rec(self.cur_subtree().value.path) =~= set![],
ensures
!self@.present(),

Generalises cur_entry_absent_not_present to any empty subtree.

Source

pub proof fn cur_entry_frame_present(self)

requires
self.inv(),
self.cur_entry_owner().is_frame(),
ensures
self@.present(),
self@
    .query(
        self.cur_entry_owner().frame.unwrap().mapped_pa,
        self.cur_entry_owner().frame.unwrap().size,
        self.cur_entry_owner().frame.unwrap().prop,
    ),
Source

pub open spec fn path_metaregion_sound(self, regions: MetaRegionOwners) -> bool

{
    forall |i: int| {
        self.level - 1 <= i < NR_LEVELS
            ==> self.continuations[i].entry_own.metaregion_sound(regions)
    }
}

The entry_own at each continuation level satisfies metaregion_sound.

Source

pub open spec fn metaregion_sound(self, regions: MetaRegionOwners) -> bool

{
    &&& self
        .map_full_tree(|entry_owner: EntryOwner<C>, path: TreePath<NR_ENTRIES>| {
            entry_owner.metaregion_sound(regions)
        })
    &&& self.path_metaregion_sound(regions)

}
Source

pub proof fn metaregion_preserved( self, other: Self, regions0: MetaRegionOwners, regions1: MetaRegionOwners, )

requires
self.inv(),
self.metaregion_sound(regions0),
self.level == other.level,
self.continuations =~= other.continuations,
OwnerSubtree::implies(
    PageTableOwner::<C>::metaregion_sound_pred(regions0),
    PageTableOwner::<C>::metaregion_sound_pred(regions1),
),
ensures
other.metaregion_sound(regions1),
Source

pub proof fn metaregion_slot_owners_preserved( self, regions0: MetaRegionOwners, regions1: MetaRegionOwners, )

requires
self.inv(),
self.metaregion_sound(regions0),
regions0.slot_owners =~= regions1.slot_owners,
forall |k: usize| {
    regions0.slots.contains_key(k) ==> #[trigger] regions1.slots.contains_key(k)
},
forall |k: usize| {
    regions0.slots.contains_key(k) ==> regions0.slots[k] == #[trigger] regions1.slots[k]
},
ensures
self.metaregion_sound(regions1),

Transfers metaregion_sound when slot_owners is preserved.

Source

pub proof fn metaregion_slot_owners_rc_increment( self, regions0: MetaRegionOwners, regions1: MetaRegionOwners, idx: usize, )

requires
self.inv(),
self.metaregion_sound(regions0),
regions0.inv(),
regions1.slots == regions0.slots,
regions1.slot_owners.dom() == regions0.slot_owners.dom(),
regions1.slot_owners[idx].inner_perms.ref_count.value()
    == regions0.slot_owners[idx].inner_perms.ref_count.value() + 1,
regions1.slot_owners[idx].inner_perms.ref_count.id()
    == regions0.slot_owners[idx].inner_perms.ref_count.id(),
regions1.slot_owners[idx].inner_perms.storage
    == regions0.slot_owners[idx].inner_perms.storage,
regions1.slot_owners[idx].inner_perms.vtable_ptr
    == regions0.slot_owners[idx].inner_perms.vtable_ptr,
regions1.slot_owners[idx].inner_perms.in_list
    == regions0.slot_owners[idx].inner_perms.in_list,
regions1.slot_owners[idx].paths_in_pt == regions0.slot_owners[idx].paths_in_pt,
regions1.slot_owners[idx].self_addr == regions0.slot_owners[idx].self_addr,
regions1.slot_owners[idx].raw_count == regions0.slot_owners[idx].raw_count,
regions1.slot_owners[idx].usage == regions0.slot_owners[idx].usage,
regions1.slot_owners[idx].inner_perms.ref_count.value() != REF_COUNT_UNUSED,
forall |i: usize| {
    i != idx && regions0.slot_owners.contains_key(i)
        ==> regions1.slot_owners[i] == regions0.slot_owners[i]
},
ensures
self.metaregion_sound(regions1),
Source

pub proof fn metaregion_borrow_slot( self, regions0: MetaRegionOwners, regions1: MetaRegionOwners, changed_idx: usize, )

requires
self.inv(),
self.metaregion_sound(regions0),
regions1.inv(),
forall |k: usize| {
    regions0.slots.contains_key(k) ==> #[trigger] regions1.slots.contains_key(k)
},
forall |k: usize| {
    regions0.slots.contains_key(k) && k != changed_idx
        ==> regions0.slots[k] == #[trigger] regions1.slots[k]
},
regions0.slot_owners[changed_idx].raw_count == 0,
regions1.slot_owners[changed_idx].raw_count == 1,
regions1.slot_owners[changed_idx].inner_perms
    == regions0.slot_owners[changed_idx].inner_perms,
regions1.slot_owners[changed_idx].self_addr
    == regions0.slot_owners[changed_idx].self_addr,
regions1.slot_owners[changed_idx].usage == regions0.slot_owners[changed_idx].usage,
regions1.slot_owners[changed_idx].paths_in_pt
    == regions0.slot_owners[changed_idx].paths_in_pt,
forall |i: usize| i != changed_idx ==> regions0.slot_owners[i] == regions1.slot_owners[i],
regions0.slot_owners.dom() =~= regions1.slot_owners.dom(),
ensures
self.metaregion_sound(regions1),

Transfers metaregion_sound when raw_count changed from 0 to 1 at one index. Uses map_implies_and with not_in_scope_pred since tree entries have !in_scope.

Source

pub proof fn cont_entries_metaregion(self, regions: MetaRegionOwners)

requires
self.inv(),
self.metaregion_sound(regions),
ensures
forall |i: int| {
    self.level - 1 <= i < NR_LEVELS
        ==> self.continuations[i].entry_own.metaregion_sound(regions)
},

Continuation entry_owns satisfy metaregion_sound.

§Justification

When the cursor descends into a subtree, each continuation’s entry_own was previously checked by tree_predicate_map in the parent’s child subtree. After descent, map_full_tree only covers the siblings (the taken child is None), so the path entries’ properties are no longer covered by map_full_tree. However, regions is unchanged since descent, so the properties still hold.

Source

pub open spec fn new_spec( owner_subtree: OwnerSubtree<C>, idx: usize, guard_perm: GuardPerm<'rcu, C>, ) -> Self

{
    let va = AbstractVaddr {
        offset: 0,
        index: Map::new(|i: int| 0 <= i < NR_LEVELS, |i: int| 0)
            .insert(NR_LEVELS - 1, idx as int),
        leading_bits: C::LEADING_BITS_spec() as int,
    };
    Self {
        level: NR_LEVELS as PagingLevel,
        continuations: Map::empty()
            .insert(
                NR_LEVELS - 1 as int,
                CursorContinuation::new_spec(owner_subtree, idx, guard_perm),
            ),
        va,
        guard_level: NR_LEVELS as PagingLevel,
        prefix: va,
        popped_too_high: false,
    }
}
Source

pub proof fn new(tracked owner_subtree: OwnerSubtree<C>, idx: usize, tracked guard_perm: GuardPerm<'rcu, C>, ) -> tracked res : Self

ensures
res == Self::new_spec(owner_subtree, idx, guard_perm),
Source§

impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>

Source

pub proof fn view_non_overlapping(self)

requires
self.inv(),
ensures
self@.non_overlapping(),

The cursor’s view has non-overlapping mappings. This follows from the tree structure alone: as_page_table_owner_preserves_view_mappings collapses the union-over-continuations view into a single root-rooted view_rec, after which view_rec_disjoint_vaddrs gives pairwise disjointness directly.

Source§

impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>

Source

pub proof fn split_while_huge_node_noop(self)

requires
self.inv(),
self.cur_entry_owner().is_node(),
self.level > 1,
ensures
self@.split_while_huge(page_size((self.level - 1) as PagingLevel)) == self@,
Source

pub proof fn split_while_huge_absent_noop(self, size: usize)

requires
self.inv(),
self.cur_entry_owner().is_absent(),
ensures
self@.split_while_huge(size) == self@,

When the current entry is absent, there is no mapping at cur_va in the abstract view, so split_while_huge finds nothing to split and is a no-op for any target size.

Source

pub proof fn split_while_huge_at_level_noop(self)

requires
self.inv(),
ensures
self@.split_while_huge(page_size(self.level as PagingLevel)) == self@,
Source

pub proof fn map_branch_frame_split_while_huge( self, owner0: Self, owner_before_frame: Self, level_before_frame: int, )

requires
self.inv(),
owner0.inv(),
owner_before_frame.inv(),
1 <= level_before_frame - 1,
level_before_frame <= NR_LEVELS,
self.level == (level_before_frame - 1) as u8,
owner_before_frame@
    == owner0@.split_while_huge(page_size(level_before_frame as PagingLevel)),
self@
    == owner_before_frame@
        .split_if_mapped_huge_spec(page_size((level_before_frame - 1) as PagingLevel)),
ensures
self@ == owner0@.split_while_huge(page_size(self.level as PagingLevel)),

After map_branch_none splits a huge frame at level level_before_frame and descends, the cursor view equals owner0@.split_while_huge(page_size(level_before_frame - 1)).

Chain: owner@ = owner_before_frame@.split_if_mapped_huge_spec(page_size(level_before_frame - 1)) = owner0@.split_while_huge(page_size(level_before_frame)).split_if_mapped_huge_spec(…) = owner0@.split_while_huge(page_size(level_before_frame - 1)) The last equality uses the fact that split_while_huge(L) on a frame of size page_size(L) takes exactly one split step to page_size(L-1), matching split_if_mapped_huge_spec.

Source

pub proof fn find_next_split_push_equals_split_while_huge( self, old_view: CursorView<C>, )

requires
self.inv(),
old_view.inv(),
self.cur_entry_owner().is_frame(),
self@.cur_va == old_view.cur_va,
old_view.present(),
old_view.query_mapping().page_size > page_size(self.level as PagingLevel),
old_view.query_mapping().page_size / NR_ENTRIES == page_size(self.level as PagingLevel),
old_view.query_mapping().page_size % page_size(self.level as PagingLevel) == 0,
self@.mappings
    =~= old_view.split_if_mapped_huge_spec(page_size(self.level as PagingLevel)).mappings,
ensures
self@.mappings
    =~= old_view.split_while_huge(page_size(self.level as PagingLevel)).mappings,

After split_if_mapped_huge + push_level, the mappings equal old_view.split_while_huge(page_size(current_level)).

Source

pub proof fn split_while_huge_cur_va_independent( v1: CursorView<C>, v2: CursorView<C>, size: usize, )

requires
v1.inv(),
v2.inv(),
v1.mappings =~= v2.mappings,
v1.cur_va <= v2.cur_va,
v1
    .mappings
    .filter(|m: Mapping| v1.cur_va <= m.va_range.start && m.va_range.start < v2.cur_va)
    =~= Set::<Mapping>::empty(),
!v1.present() && v2.present() ==> v2.query_mapping().page_size <= size,
ensures
v1.split_while_huge(size).mappings =~= v2.split_while_huge(size).mappings,

split_while_huge gives the same mappings for two cur_va values when no mapping starts between them and the !present case is a no-op.

Source§

impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>

Source

pub proof fn map_children_implies( self, f: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>, g: FnSpec<(EntryOwner<C>, TreePath<NR_ENTRIES>), bool>, )

requires
self.inv(),
OwnerSubtree::implies(f, g),
forall |i: int| self.level - 1 <= i < NR_LEVELS ==> self.continuations[i].map_children(f),
ensures
forall |i: int| self.level - 1 <= i < NR_LEVELS ==> self.continuations[i].map_children(g),
Source

pub proof fn cur_entry_node_implies_level_gt_1(self)

requires
self.inv(),
self.cur_entry_owner().is_node(),
ensures
self.level > 1,
Source

pub proof fn frame_not_fits_implies_level_gt_1( self, cur_entry_fits_range: bool, cur_va: Vaddr, end: Vaddr, )

requires
self.inv(),
self.cur_entry_owner().is_frame(),
!cur_entry_fits_range,
cur_va < end,
cur_va == self.cur_va(),
cur_entry_fits_range
    == (cur_va == self.cur_va_range().start.to_vaddr()
        && self.cur_va_range().end.to_vaddr() <= end),
cur_va as nat % PAGE_SIZE as nat == 0,
end as nat % PAGE_SIZE as nat == 0,
ensures
self.level > 1,

A frame entry at the cursor’s current level that doesn’t fit the aligned range [cur_va, end) must be at level > 1. Justification: At level 1, page_size(1) == BASE_PAGE_SIZE. Since the cursor VA and end are BASE_PAGE_SIZE-aligned and cur_va < end, we have cur_va + page_size(1) <= end, so a level-1 frame always fits. Therefore !cur_entry_fits_range implies level > 1.

Source

pub open spec fn not_in_tree(self, owner: EntryOwner<C>) -> bool

{
    self.map_full_tree(|owner0: EntryOwner<C>, path: TreePath<NR_ENTRIES>| {
        owner0.meta_slot_paddr_neq(owner)
    })
}
Source

pub proof fn absent_not_in_tree(self, owner: EntryOwner<C>)

requires
self.inv(),
owner.inv(),
owner.is_absent(),
ensures
self.not_in_tree(owner),
Source§

impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C>

Source

pub open spec fn zero_below_level_rec(self, level: PagingLevel) -> Self

{
    if self.level <= level {
        self
    } else {
        Self {
            va: AbstractVaddr {
                index: self.va.index.insert(level - 1, 0),
                ..self.va
            },
            ..self.zero_below_level_rec((level + 1) as u8)
        }
    }
}
Source

pub open spec fn zero_below_level(self) -> Self

recommends
1 <= self.level <= NR_LEVELS,
{
    Self {
        va: self.va.align_down(self.level as int),
        ..self
    }
}
Source

pub open spec fn cur_va(self) -> Vaddr

{ self.va.to_vaddr() }
Source

pub open spec fn cur_va_range(self) -> Range<AbstractVaddr>

{
    let start = self.va.align_down(self.level as int);
    let end = self.va.align_up(self.level as int);
    Range { start, end }
}
Source

pub open spec fn set_va_spec(self, new_va: AbstractVaddr) -> Self

{ Self { va: new_va, ..self } }
Source

pub open spec fn set_va_in_node_spec(self, new_va: AbstractVaddr) -> Self

{
    let old_cont = self.continuations[self.level - 1];
    Self {
        va: new_va,
        continuations: self
            .continuations
            .insert(
                self.level - 1,
                CursorContinuation {
                    idx: new_va.index[self.level - 1] as usize,
                    ..old_cont
                },
            ),
        ..self
    }
}
Source

pub proof fn zero_below_level_rec_preserves_above(self, level: PagingLevel)

ensures
forall |lv: int| {
    lv >= self.level
        ==> self.zero_below_level_rec(level).va.index[lv] == #[trigger] self.va.index[lv]
},
Source

pub proof fn zero_below_level_va(self)

requires
1 <= self.level <= NR_LEVELS,
ensures
self.zero_below_level().va == self.va.align_down(self.level as int),

Unfolds zero_below_level to expose the VA as align_down(level).

Source

pub proof fn zero_preserves_above(self)

requires
self.va.inv(),
1 <= self.level <= NR_LEVELS,
ensures
forall |lv: int| {
    self.level <= lv < NR_LEVELS
        ==> self.zero_below_level().va.index[lv] == #[trigger] self.va.index[lv]
},
Source

pub proof fn do_zero_below_level(tracked &mut self)

requires
old(self).inv(),
ensures
*final(self) == old(self).zero_below_level(),
final(self).inv(),
Source

pub proof fn zero_rec_preserves_all_but_va(self, level: PagingLevel)

ensures
self.zero_below_level_rec(level).level == self.level,
self.zero_below_level_rec(level).continuations == self.continuations,
self.zero_below_level_rec(level).guard_level == self.guard_level,
self.zero_below_level_rec(level).prefix == self.prefix,
self.zero_below_level_rec(level).popped_too_high == self.popped_too_high,
Source

pub proof fn zero_preserves_all_but_va(self)

ensures
self.zero_below_level().level == self.level,
self.zero_below_level().continuations == self.continuations,
self.zero_below_level().guard_level == self.guard_level,
self.zero_below_level().prefix == self.prefix,
self.zero_below_level().popped_too_high == self.popped_too_high,
Source

pub proof fn inc_and_zero_increases_va(self)

requires
self.inv(),
self.index() + 1 < NR_ENTRIES,
ensures
self.inc_index().zero_below_level().va.to_vaddr() > self.va.to_vaddr(),
Source

pub proof fn cur_va_range_reflects_view(self)

requires
self.inv(),
self.cur_entry_owner().is_frame(),
ensures
self.cur_va_range().start.reflect(self@.query_range().start as Vaddr),
self.cur_va_range().end.reflect(self@.query_range().end as Vaddr),
Source

pub proof fn cur_va_in_subtree_range(self)

requires
self.inv(),
ensures
vaddr(self.cur_subtree().value.path) as int
    + self.va.leading_bits * 0x1_0000_0000_0000int <= self.cur_va() as int,
(self.cur_va() as int)
    < vaddr(self.cur_subtree().value.path) as int
        + self.va.leading_bits * 0x1_0000_0000_0000int
        + page_size(self.level as PagingLevel) as int,

The current virtual address falls within the VA range of the current subtree’s path, in canonical form (positional vaddr plus the leading_bits * 2^48 shift).

Source

pub proof fn set_va(tracked &mut self, new_va: AbstractVaddr)

requires
forall |i: int| {
    old(self).level - 1 <= i < NR_LEVELS ==> new_va.index[i] == old(self).va.index[i]
},
forall |i: int| {
    old(self).guard_level - 1 <= i < NR_LEVELS
        ==> new_va.index[i] == old(self).prefix.index[i]
},
ensures
*final(self) == old(self).set_va_spec(new_va),
Source

pub proof fn set_va_in_node(tracked &mut self, new_va: AbstractVaddr)

requires
old(self).inv(),
new_va.inv(),
forall |i: int| {
    old(self).level <= i < NR_LEVELS ==> new_va.index[i] == old(self).va.index[i]
},
old(self).locked_range().start <= new_va.to_vaddr() < old(self).locked_range().end,
ensures
*final(self) == old(self).set_va_in_node_spec(new_va),
final(self).inv(),

When jumping within the same page-table node, only indices at levels

= level are guaranteed to match. The entry-within-node index (level - 1) may change, so we update continuations[level-1].idx along with va.

Trait Implementations§

Source§

impl<'rcu, C: PageTableConfig> Inv for CursorOwner<'rcu, C>

Source§

open spec fn inv(self) -> bool

{
    &&& self.va.inv()
    &&& self.va.offset == 0
    &&& 1 <= self.level <= NR_LEVELS
    &&& 1 <= self.guard_level <= NR_LEVELS
    &&& C::TOP_LEVEL_INDEX_RANGE_spec().start <= self.va.index[NR_LEVELS - 1]
    &&& self.va.index[NR_LEVELS - 1] <= C::TOP_LEVEL_INDEX_RANGE_spec().end
    &&& self.in_locked_range() || self.above_locked_range()
    &&& self.popped_too_high ==> self.level >= self.guard_level && self.in_locked_range()
    &&& !self.popped_too_high
        ==> self.level < self.guard_level || self.above_locked_range()
    &&& self.continuations[self.level - 1].all_some()
    &&& forall |i: int| {
        self.level <= i < NR_LEVELS
            ==> { (#[trigger] self.continuations[i]).all_but_index_some() }
    }
    &&& self.prefix.inv()
    &&& forall |i: int| i < self.guard_level ==> self.prefix.index[i] == 0
    &&& self.prefix.index[NR_LEVELS - 1] < C::TOP_LEVEL_INDEX_RANGE_spec().end
    &&& self.va.leading_bits == self.prefix.leading_bits
    &&& self.prefix.leading_bits == C::LEADING_BITS_spec() as int
    &&& !self.popped_too_high
        ==> forall |i: int| {
            self.guard_level <= i < NR_LEVELS
                ==> self.va.index[i] == self.prefix.index[i]
        }
    &&& !self.popped_too_high && self.guard_level >= 1 && self.level < self.guard_level
        ==> self.va.index[self.guard_level - 1] == 0
    &&& self.level <= 4
        ==> {
            &&& self.continuations.contains_key(3)
            &&& self.continuations[3].inv()
            &&& self.continuations[3].level() == 4
            &&& self.continuations[3].entry_own.parent_level == 5
            &&& self.va.index[3] == self.continuations[3].idx

        }
    &&& self.level <= 3
        ==> {
            &&& self.continuations.contains_key(2)
            &&& self.continuations[2].inv()
            &&& self.continuations[2].level() == 3
            &&& self.continuations[2].entry_own.parent_level == 4
            &&& self.va.index[2] == self.continuations[2].idx
            &&& self.continuations[2].guard_perm.value().inner.inner@.ptr.addr()
                != self.continuations[3].guard_perm.value().inner.inner@.ptr.addr()
            &&& self.continuations[2].path()
                == self
                    .continuations[3]
                    .path()
                    .push_tail(self.continuations[3].idx as usize)
            &&& self.continuations[2].entry_own.path.len()
                == self.continuations[3].entry_own.node.unwrap().tree_level + 1
            &&& self
                .continuations[2]
                .entry_own
                .match_pte(
                    self
                        .continuations[3]
                        .entry_own
                        .node
                        .unwrap()
                        .children_perm
                        .value()[self.continuations[3].idx as int],
                    self.continuations[3].entry_own.node.unwrap().level,
                )
            &&& self.continuations[2].entry_own.parent_level
                == self.continuations[3].entry_own.node.unwrap().level

        }
    &&& self.level <= 2
        ==> {
            &&& self.continuations.contains_key(1)
            &&& self.continuations[1].inv()
            &&& self.continuations[1].level() == 2
            &&& self.continuations[1].entry_own.parent_level == 3
            &&& self.va.index[1] == self.continuations[1].idx
            &&& self.continuations[1].guard_perm.value().inner.inner@.ptr.addr()
                != self.continuations[2].guard_perm.value().inner.inner@.ptr.addr()
            &&& self.continuations[1].guard_perm.value().inner.inner@.ptr.addr()
                != self.continuations[3].guard_perm.value().inner.inner@.ptr.addr()
            &&& self.continuations[1].path()
                == self
                    .continuations[2]
                    .path()
                    .push_tail(self.continuations[2].idx as usize)
            &&& self.continuations[1].entry_own.path.len()
                == self.continuations[2].entry_own.node.unwrap().tree_level + 1
            &&& self
                .continuations[1]
                .entry_own
                .match_pte(
                    self
                        .continuations[2]
                        .entry_own
                        .node
                        .unwrap()
                        .children_perm
                        .value()[self.continuations[2].idx as int],
                    self.continuations[2].entry_own.node.unwrap().level,
                )
            &&& self.continuations[1].entry_own.parent_level
                == self.continuations[2].entry_own.node.unwrap().level

        }
    &&& self.level == 1
        ==> {
            &&& self.continuations.contains_key(0)
            &&& self.continuations[0].inv()
            &&& self.continuations[0].level() == 1
            &&& self.continuations[0].entry_own.parent_level == 2
            &&& self.va.index[0] == self.continuations[0].idx
            &&& self.continuations[0].guard_perm.value().inner.inner@.ptr.addr()
                != self.continuations[1].guard_perm.value().inner.inner@.ptr.addr()
            &&& self.continuations[0].guard_perm.value().inner.inner@.ptr.addr()
                != self.continuations[2].guard_perm.value().inner.inner@.ptr.addr()
            &&& self.continuations[0].guard_perm.value().inner.inner@.ptr.addr()
                != self.continuations[3].guard_perm.value().inner.inner@.ptr.addr()
            &&& self.continuations[0].path()
                == self
                    .continuations[1]
                    .path()
                    .push_tail(self.continuations[1].idx as usize)
            &&& self.continuations[0].entry_own.path.len()
                == self.continuations[1].entry_own.node.unwrap().tree_level + 1
            &&& self
                .continuations[0]
                .entry_own
                .match_pte(
                    self
                        .continuations[1]
                        .entry_own
                        .node
                        .unwrap()
                        .children_perm
                        .value()[self.continuations[1].idx as int],
                    self.continuations[1].entry_own.node.unwrap().level,
                )
            &&& self.continuations[0].entry_own.parent_level
                == self.continuations[1].entry_own.node.unwrap().level

        }

}
Source§

impl<'rcu, C: PageTableConfig> InvView for CursorOwner<'rcu, C>

Source§

impl<'rcu, C: PageTableConfig> View for CursorOwner<'rcu, C>

Source§

open spec fn view(&self) -> Self::V

{
    CursorView {
        cur_va: self.cur_va(),
        mappings: self.view_mappings(),
        phantom: PhantomData,
    }
}
Source§

type V = CursorView<C>

Auto Trait Implementations§

§

impl<'rcu, C> Freeze for CursorOwner<'rcu, C>

§

impl<'rcu, C> !RefUnwindSafe for CursorOwner<'rcu, C>

§

impl<'rcu, C> Send for CursorOwner<'rcu, C>

§

impl<'rcu, C> Sync for CursorOwner<'rcu, C>

§

impl<'rcu, C> Unpin for CursorOwner<'rcu, C>
where <C as PageTableConfig>::E: Unpin, C: Unpin,

§

impl<'rcu, C> UnsafeUnpin for CursorOwner<'rcu, C>

§

impl<'rcu, C> !UnwindSafe for CursorOwner<'rcu, C>

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

§

impl<T, VERUS_SPEC__A> FromSpec<T> for VERUS_SPEC__A
where VERUS_SPEC__A: From<T>,

§

fn obeys_from_spec() -> bool

§

fn from_spec(v: T) -> VERUS_SPEC__A

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

§

impl<T, VERUS_SPEC__A> IntoSpec<T> for VERUS_SPEC__A
where VERUS_SPEC__A: Into<T>,

§

fn obeys_into_spec() -> bool

§

fn into_spec(self) -> T

§

impl<T, U> IntoSpecImpl<U> for T
where U: From<T>,

§

fn obeys_into_spec() -> bool

§

fn into_spec(self) -> U

Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
§

impl<T, VERUS_SPEC__A> TryFromSpec<T> for VERUS_SPEC__A
where VERUS_SPEC__A: TryFrom<T>,

§

fn obeys_try_from_spec() -> bool

§

fn try_from_spec( v: T, ) -> Result<VERUS_SPEC__A, <VERUS_SPEC__A as TryFrom<T>>::Error>

Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
§

impl<T, VERUS_SPEC__A> TryIntoSpec<T> for VERUS_SPEC__A
where VERUS_SPEC__A: TryInto<T>,

§

fn obeys_try_into_spec() -> bool

§

fn try_into_spec(self) -> Result<T, <VERUS_SPEC__A as TryInto<T>>::Error>

§

impl<T, U> TryIntoSpecImpl<U> for T
where U: TryFrom<T>,

§

fn obeys_try_into_spec() -> bool

§

fn try_into_spec(self) -> Result<U, <U as TryFrom<T>>::Error>