ostd/specs/mm/page_table/cursor/
owners.rs

1use vstd::arithmetic::power2::pow2;
2use vstd::prelude::*;
3use vstd::seq_lib::*;
4use vstd::set::{axiom_set_choose_len, axiom_set_contains_len, axiom_set_intersect_finite};
5
6use vstd_extra::arithmetic::{
7    lemma_nat_align_down_monotone, lemma_nat_align_down_sound, lemma_nat_align_down_within_block,
8    lemma_nat_align_up_sound,
9};
10use vstd_extra::drop_tracking::*;
11use vstd_extra::ghost_tree::*;
12use vstd_extra::ownership::*;
13use vstd_extra::seq_extra::{forall_seq, lemma_forall_seq_index};
14
15use core::marker::PhantomData;
16use core::ops::Range;
17
18use crate::mm::frame::meta::mapping::frame_to_index;
19use crate::mm::page_table::*;
20use crate::mm::page_prop::PageProperty;
21use crate::mm::{nr_subpage_per_huge, Paddr, PagingConstsTrait, PagingLevel, Vaddr};
22use crate::specs::arch::mm::{MAX_USERSPACE_VADDR, NR_ENTRIES, NR_LEVELS, PAGE_SIZE};
23use crate::specs::mm::frame::meta_owners::{REF_COUNT_MAX, REF_COUNT_UNUSED};
24use crate::specs::mm::page_table::cursor::page_size_lemmas::{
25    lemma_page_size_divides, lemma_page_size_ge_page_size, lemma_page_size_spec_level1,
26};
27use crate::specs::arch::paging_consts::PagingConsts;
28use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
29use crate::specs::mm::page_table::node::GuardPerm;
30use crate::specs::mm::page_table::owners::*;
31use crate::specs::mm::page_table::view::PageTableView;
32use crate::specs::mm::page_table::AbstractVaddr;
33use crate::specs::mm::page_table::Guards;
34use crate::specs::mm::page_table::Mapping;
35use crate::specs::mm::page_table::{nat_align_down, nat_align_up};
36use crate::specs::task::InAtomicMode;
37
38verus! {
39
40pub tracked struct CursorContinuation<'rcu, C: PageTableConfig> {
41    pub entry_own: EntryOwner<C>,
42    pub idx: usize,
43    pub tree_level: nat,
44    pub children: Seq<Option<OwnerSubtree<C>>>,
45    pub path: TreePath<NR_ENTRIES>,
46    pub guard_perm: GuardPerm<'rcu, C>,
47}
48
49impl<'rcu, C: PageTableConfig> CursorContinuation<'rcu, C> {
50    pub open spec fn path(self) -> TreePath<NR_ENTRIES> {
51        self.entry_own.path
52    }
53
54    pub open spec fn child(self) -> OwnerSubtree<C> {
55        self.children[self.idx as int].unwrap()
56    }
57
58    pub open spec fn take_child_spec(self) -> (OwnerSubtree<C>, Self) {
59        let child = self.children[self.idx as int].unwrap();
60        let cont = Self {
61            children: self.children.remove(self.idx as int).insert(self.idx as int, None),
62            ..self
63        };
64        (child, cont)
65    }
66
67    #[verifier::returns(proof)]
68    pub proof fn take_child(tracked &mut self) -> (res: OwnerSubtree<C>)
69        requires
70            old(self).inv(),
71            old(self).idx < old(self).children.len(),
72            old(self).children[old(self).idx as int] is Some,
73        ensures
74            res == old(self).take_child_spec().0,
75            *self == old(self).take_child_spec().1,
76            res.inv()
77    {
78        self.inv_children_unroll(self.idx as int);
79        let tracked child = self.children.tracked_remove(old(self).idx as int).tracked_unwrap();
80        self.children.tracked_insert(old(self).idx as int, None);
81        child
82    }
83
84    pub open spec fn put_child_spec(self, child: OwnerSubtree<C>) -> Self {
85        Self {
86            children: self.children.remove(self.idx as int).insert(self.idx as int, Some(child)),
87            ..self
88        }
89    }
90
91    #[verifier::returns(proof)]
92    pub proof fn put_child(tracked &mut self, tracked child: OwnerSubtree<C>)
93        requires
94            old(self).idx < old(self).children.len(),
95            old(self).children[old(self).idx as int] is None,
96        ensures
97            *self == old(self).put_child_spec(child)
98    {
99        let _ = self.children.tracked_remove(old(self).idx as int);
100        self.children.tracked_insert(old(self).idx as int, Some(child));
101    }
102
103    pub proof fn take_put_child(self)
104        requires
105            self.idx < self.children.len(),
106            self.children[self.idx as int] is Some,
107        ensures
108            self.take_child_spec().1.put_child_spec(self.take_child_spec().0) == self,
109    {
110        let child = self.take_child_spec().0;
111        let cont = self.take_child_spec().1;
112        assert(cont.put_child_spec(child).children == self.children);
113    }
114
115    pub open spec fn make_cont_spec(self, idx: usize, guard_perm: GuardPerm<'rcu, C>) -> (Self, Self) {
116        let child = Self {
117            entry_own: self.children[self.idx as int].unwrap().value,
118            tree_level: (self.tree_level + 1) as nat,
119            idx: idx,
120            children: self.children[self.idx as int].unwrap().children,
121            path: self.path.push_tail(self.idx as usize),
122            guard_perm: guard_perm,
123        };
124        let cont = Self {
125            children: self.children.update(self.idx as int, None),
126            ..self
127        };
128        (child, cont)
129    }
130
131    #[verifier::returns(proof)]
132    pub axiom fn make_cont(tracked &mut self, idx: usize, tracked guard_perm: Tracked<GuardPerm<'rcu, C>>) -> (res: Self)
133        requires
134            old(self).all_some(),
135            old(self).idx < NR_ENTRIES,
136            idx < NR_ENTRIES,
137        ensures
138            res == old(self).make_cont_spec(idx, guard_perm@).0,
139            *self == old(self).make_cont_spec(idx, guard_perm@).1;
140
141    pub open spec fn restore_spec(self, child: Self) -> (Self, GuardPerm<'rcu, C>) {
142        let child_node = OwnerSubtree {
143            value: child.entry_own,
144            level: child.tree_level,
145            children: child.children,
146        };
147        (Self { children: self.children.update(self.idx as int, Some(child_node)), ..self }, child.guard_perm)
148    }
149
150    #[verifier::returns(proof)]
151    pub axiom fn restore(tracked &mut self, tracked child: Self) -> (tracked guard_perm: GuardPerm<'rcu, C>)
152        ensures
153            *self == old(self).restore_spec(child).0,
154            guard_perm == old(self).restore_spec(child).1;
155
156    pub open spec fn new_spec(owner_subtree: OwnerSubtree<C>, idx: usize, guard_perm: GuardPerm<'rcu, C>) -> Self {
157        Self {
158            entry_own: owner_subtree.value,
159            idx: idx,
160            tree_level: owner_subtree.level,
161            children: owner_subtree.children,
162            path: TreePath::new(Seq::empty()),
163            guard_perm: guard_perm,
164        }
165    }
166
167    pub axiom fn new(tracked owner_subtree: OwnerSubtree<C>, idx: usize, tracked guard_perm: GuardPerm<'rcu, C>)
168    -> (tracked res: Self)
169        ensures
170            res == Self::new_spec(owner_subtree, idx, guard_perm);
171
172    pub open spec fn map_children(self, f: spec_fn(EntryOwner<C>, TreePath<NR_ENTRIES>) -> bool) -> bool {
173        forall |i: int|
174            #![auto]
175            0 <= i < self.children.len() ==>
176                self.children[i] is Some ==>
177                    self.children[i].unwrap().tree_predicate_map(self.path().push_tail(i as usize), f)
178    }
179
180    /// Lift map_children(f) to map_children(g) when implies(f, g).
181    pub proof fn map_children_lift(
182        self,
183        f: spec_fn(EntryOwner<C>, TreePath<NR_ENTRIES>) -> bool,
184        g: spec_fn(EntryOwner<C>, TreePath<NR_ENTRIES>) -> bool,
185    )
186        requires
187            self.inv(),
188            self.map_children(f),
189            OwnerSubtree::implies(f, g),
190        ensures
191            self.map_children(g),
192    {
193        assert forall|j: int|
194            #![auto]
195            0 <= j < self.children.len()
196                && self.children[j] is Some implies
197                self.children[j].unwrap().tree_predicate_map(
198                    self.path().push_tail(j as usize), g)
199        by {
200            self.inv_children_unroll(j);
201            OwnerSubtree::map_implies(
202                self.children[j].unwrap(),
203                self.path().push_tail(j as usize),
204                f, g,
205            );
206        };
207    }
208
209    /// Lift map_children from f to g when siblings (j != idx) come from
210    /// cont0 which had map_children(f), and the child at idx (if present)
211    /// already satisfies g.
212    pub proof fn map_children_lift_skip_idx(
213        self,
214        cont0: Self,
215        idx: int,
216        f: spec_fn(EntryOwner<C>, TreePath<NR_ENTRIES>) -> bool,
217        g: spec_fn(EntryOwner<C>, TreePath<NR_ENTRIES>) -> bool,
218    )
219        requires
220            0 <= idx < NR_ENTRIES,
221            OwnerSubtree::implies(f, g),
222            cont0.inv(),
223            cont0.map_children(f),
224            self.path() == cont0.path(),
225            self.children.len() == cont0.children.len(),
226            forall|j: int| #![auto]
227                0 <= j < NR_ENTRIES && j != idx ==>
228                self.children[j] == cont0.children[j],
229            self.children[idx] is Some ==>
230                self.children[idx].unwrap().tree_predicate_map(
231                    self.path().push_tail(idx as usize), g),
232        ensures
233            self.map_children(g),
234    {
235        assert forall|j: int|
236            #![auto]
237            0 <= j < self.children.len()
238                && self.children[j] is Some implies
239                self.children[j].unwrap().tree_predicate_map(
240                    self.path().push_tail(j as usize), g)
241        by {
242            if j != idx {
243                cont0.inv_children_unroll(j);
244                OwnerSubtree::map_implies(
245                    cont0.children[j].unwrap(),
246                    cont0.path().push_tail(j as usize), f, g);
247            }
248        };
249    }
250
251    pub open spec fn level(self) -> PagingLevel {
252        self.entry_own.node.unwrap().level
253    }
254
255    pub open spec fn inv_children(self) -> bool {
256        self.children.all(
257            |child: Option<OwnerSubtree<C>>|
258            child is Some ==> child.unwrap().inv()
259        )
260    }
261
262    pub proof fn inv_children_unroll(self, i:int)
263        requires
264            self.inv_children(),
265            0 <= i < self.children.len(),
266            self.children[i] is Some
267        ensures
268            self.children[i].unwrap().inv()
269    {
270        let pred = |child: Option<OwnerSubtree<C>>| child is Some ==> child.unwrap().inv();
271        assert(pred(self.children[i]));
272    }
273
274    pub proof fn inv_children_unroll_all(self)
275        requires
276            self.inv_children()
277        ensures
278            forall |i:int|
279                #![auto]
280                0 <= i < self.children.len() ==>
281                self.children[i] is Some ==>
282                self.children[i].unwrap().inv()
283    {
284        let pred = |child: Option<OwnerSubtree<C>>| child is Some ==> child.unwrap().inv();
285        assert forall |i:int| 0 <= i < self.children.len() && #[trigger] self.children[i] is Some implies
286            self.children[i].unwrap().inv()
287        by {
288            self.inv_children_unroll(i)
289        }
290    }
291
292    pub open spec fn inv_children_rel_pred(self) -> spec_fn(int, Option<OwnerSubtree<C>>) -> bool {
293        |i: int, child: Option<OwnerSubtree<C>>| {
294            child is Some ==> {
295                &&& child.unwrap().value.parent_level == self.level()
296                &&& child.unwrap().level == self.tree_level + 1
297                &&& !child.unwrap().value.in_scope
298                &&& <EntryOwner<C> as TreeNodeValue<NR_LEVELS>>::rel_children(self.entry_own, i, Some(child.unwrap().value))
299                &&& child.unwrap().value.path == self.path().push_tail(i as usize)
300            }
301        }
302    }
303
304    pub open spec fn inv_children_rel(self) -> bool {
305        forall_seq(self.children, self.inv_children_rel_pred())
306    }
307
308    pub proof fn inv_children_rel_unroll(self, i: int)
309        requires
310            self.inv_children_rel(),
311            0 <= i < self.children.len(),
312            self.children[i] is Some,
313        ensures
314            self.children[i].unwrap().value.parent_level == self.level(),
315            self.children[i].unwrap().level == self.tree_level + 1,
316            !self.children[i].unwrap().value.in_scope,
317            <EntryOwner<C> as TreeNodeValue<NR_LEVELS>>::rel_children(self.entry_own, i, Some(self.children[i].unwrap().value)),
318            self.children[i].unwrap().value.path == self.path().push_tail(i as usize),
319    {
320        lemma_forall_seq_index(
321            self.children, self.inv_children_rel_pred(), i);
322    }
323
324    pub open spec fn inv(self) -> bool {
325        &&& self.children.len() == NR_ENTRIES
326        &&& 0 <= self.idx < NR_ENTRIES
327        &&& self.inv_children()
328        &&& self.inv_children_rel()
329        &&& self.entry_own.is_node()
330        &&& self.entry_own.inv()
331        &&& !self.entry_own.in_scope
332        &&& self.entry_own.node.unwrap().relate_guard_perm(self.guard_perm)
333        &&& self.tree_level == INC_LEVELS - self.level() - 1
334        &&& self.tree_level < INC_LEVELS - 1
335        &&& self.path().len() == self.tree_level
336    }
337
338    pub open spec fn all_some(self) -> bool {
339        forall|i: int| 0 <= i < NR_ENTRIES ==> self.children[i] is Some
340    }
341
342    pub open spec fn all_but_index_some(self) -> bool {
343        &&& forall|i: int| 0 <= i < self.idx ==> self.children[i] is Some
344        &&& forall|i: int| self.idx < i < NR_ENTRIES ==> self.children[i] is Some
345        &&& self.children[self.idx as int] is None
346    }
347
348    pub open spec fn inc_index(self) -> Self {
349        Self {
350            idx: (self.idx + 1) as usize,
351            ..self
352        }
353    }
354
355    pub proof fn do_inc_index(tracked &mut self)
356        requires
357            old(self).idx + 1 < NR_ENTRIES,
358        ensures
359            *self == old(self).inc_index(),
360    {
361        self.idx = (self.idx + 1) as usize;
362    }
363
364    pub open spec fn node_locked(self, guards: Guards<'rcu, C>) -> bool {
365        guards.lock_held(self.guard_perm.value().inner.inner@.ptr.addr())
366    }
367
368    pub open spec fn view_mappings(self) -> Set<Mapping> {
369        Set::new(
370            |m: Mapping| exists|i:int|
371            #![auto]
372            0 <= i < self.children.len() &&
373                self.children[i] is Some &&
374                PageTableOwner(self.children[i].unwrap()).view_rec(self.path().push_tail(i as usize)).contains(m)
375        )
376    }
377
378    pub open spec fn as_subtree(self) -> OwnerSubtree<C> {
379        OwnerSubtree {
380            value: self.entry_own,
381            level: self.tree_level,
382            children: self.children,
383        }
384    }
385
386    pub open spec fn as_page_table_owner(self) -> PageTableOwner<C> {
387        PageTableOwner(self.as_subtree())
388    }
389
390    pub proof fn as_page_table_owner_preserves_view_mappings(self)
391        requires
392            self.inv(),
393            self.all_some(),
394        ensures
395            self.as_page_table_owner().view_rec(self.path()) == self.view_mappings(),
396    {
397        self.inv_children_unroll_all();
398    }
399
400    pub proof fn as_subtree_restore(self, child: Self)
401        requires
402            self.inv(),
403            child.inv(),
404            self.all_but_index_some(),
405            child.all_some(),
406        ensures
407            self.restore_spec(child).0.as_subtree() ==
408            self.put_child_spec(child.as_subtree()).as_subtree(),
409    {
410        assert(self.put_child_spec(child.as_subtree()).children ==
411        self.children.update(self.idx as int, Some(child.as_subtree())));
412    }
413
414    pub open spec fn view_mappings_take_child_spec(self) -> Set<Mapping> {
415        PageTableOwner(self.children[self.idx as int].unwrap()).view_rec(self.path().push_tail(self.idx as usize))
416    }
417
418    pub proof fn view_mappings_take_child(self)
419        requires
420            self.inv(),
421            self.all_some(),
422        ensures
423            self.take_child_spec().1.view_mappings() == self.view_mappings() - self.view_mappings_take_child_spec()
424    {
425        self.inv_children_unroll_all();
426        let def = self.take_child_spec().1.view_mappings();
427        let diff = self.view_mappings() - self.view_mappings_take_child_spec();
428        assert forall |m: Mapping| diff.contains(m) implies def.contains(m) by {
429            let i = choose|i: int| 0 <= i < self.children.len() && #[trigger] self.children[i] is Some &&
430                PageTableOwner(self.children[i].unwrap()).view_rec(self.path().push_tail(i as usize)).contains(m);
431            assert(i != self.idx);
432            assert(self.take_child_spec().1.children[i] is Some);
433        };
434        assert forall |m: Mapping|
435        #![trigger def.contains(m)]
436        def.contains(m) implies diff.contains(m) by {
437            let left = self.take_child_spec().1;
438            assert(left.view_mappings().contains(m));
439            if self.view_mappings_take_child_spec().contains(m) {
440                assert(PageTableOwner(self.children[self.idx as int].unwrap()).view_rec(self.path().push_tail(self.idx as usize)).contains(m));
441                let i = choose|i: int| 0 <= i < left.children.len() && #[trigger] left.children[i] is Some &&
442                    PageTableOwner(left.children[i].unwrap()).view_rec(left.path().push_tail(i as usize)).contains(m);
443                assert(i != self.idx);
444                assert(PageTableOwner(left.children[i as int].unwrap()).view_rec(left.path().push_tail(i as usize)).contains(m));
445
446                PageTableOwner(self.children[self.idx as int].unwrap()).view_rec_vaddr_range(self.path().push_tail(self.idx as usize), m);
447                PageTableOwner(left.children[i as int].unwrap()).view_rec_vaddr_range(left.path().push_tail(i as usize), m);
448
449                let size = page_size((INC_LEVELS - self.path().len()) as PagingLevel);
450
451                // Sibling paths have disjoint VA ranges
452                sibling_paths_disjoint(self.path(), self.idx, i as usize, size);
453                page_size_monotonic((INC_LEVELS - self.path().len() - 1) as PagingLevel, (INC_LEVELS - self.path().len()) as PagingLevel);
454
455            }
456        };
457    }
458
459    pub proof fn view_mappings_put_child(self, child: OwnerSubtree<C>)
460        requires
461            self.inv(),
462            child.inv(),
463            self.all_but_index_some(),
464        ensures
465            self.put_child_spec(child).view_mappings() == self.view_mappings() + PageTableOwner(child).view_rec(self.path().push_tail(self.idx as usize))
466    {
467        let def = self.put_child_spec(child).view_mappings();
468        let sum = self.view_mappings() + PageTableOwner(child).view_rec(self.path().push_tail(self.idx as usize));
469        assert forall |m: Mapping| sum.contains(m) implies def.contains(m) by {
470            if self.view_mappings().contains(m) {
471                let i = choose|i: int| 0 <= i < self.children.len() && #[trigger] self.children[i] is Some &&
472                    PageTableOwner(self.children[i].unwrap()).view_rec(self.path().push_tail(i as usize)).contains(m);
473                assert(i != self.idx);
474                assert(self.put_child_spec(child).children[i] == self.children[i]);
475            } else {
476                assert(PageTableOwner(child).view_rec(self.path().push_tail(self.idx as usize)).contains(m));
477                assert(self.put_child_spec(child).children[self.idx as int] == Some(child));
478            }
479        };
480    }
481
482    /// Proves `rel_children` for a child that was taken from the continuation, modified
483    /// (by protect, alloc_if_none, or split_if_mapped_huge), and placed back at the same index.
484    ///
485    /// The key inputs are:
486    /// - `node_matching` from the operation's postcondition (provides `match_pte`)
487    /// - The child's path and path length (preserved by the operation)
488    /// - The entry's path (unchanged through the reconstruction)
489    /// Proves `rel_children` from `node_matching`. After taking a child from a continuation,
490    /// modifying it (protect/alloc/split), and restoring `entry_own.node = Some(parent_owner)`,
491    /// `rel_children` holds for any `entry_own` that has `node == Some(parent_owner)` and the
492    /// correct `path`.
493    pub proof fn rel_children_from_node_matching(
494        entry: &Entry<'rcu, C>,
495        child_value: EntryOwner<C>,
496        parent_owner: NodeOwner<C>,
497        guard_perm: GuardPerm<'rcu, C>,
498        entry_own: EntryOwner<C>,
499        idx: usize,
500    )
501        requires
502            entry.node_matching(child_value, parent_owner, guard_perm),
503            entry.idx == idx,
504            entry_own.node == Some(parent_owner),
505            child_value.path == entry_own.path.push_tail(idx),
506            child_value.path.len() == parent_owner.tree_level + 1,
507        ensures
508            <EntryOwner<C> as TreeNodeValue<NR_LEVELS>>::rel_children(
509                entry_own,
510                idx as int,
511                Some(child_value)),
512    { }
513
514    /// After restoring `entry_own.node = Some(parent_owner)` and putting the child back
515    /// at `idx`, the continuation invariant holds. The child at `idx` may have been modified
516    /// (e.g., by `split_if_mapped_huge` or `protect`) but must satisfy `inv()`, have the
517    /// correct path/parent_level/level, satisfy `rel_children`, and have `!in_scope`.
518    pub axiom fn continuation_inv_holds_after_child_restore(self)
519        requires
520            self.entry_own.is_node(),
521            self.entry_own.inv(),
522            self.entry_own.node.unwrap().relate_guard_perm(self.guard_perm),
523            self.children.len() == NR_ENTRIES,
524            0 <= self.idx < NR_ENTRIES,
525            self.tree_level == INC_LEVELS - self.level() - 1,
526            self.tree_level < INC_LEVELS - 1,
527            self.path().len() == self.tree_level,
528            self.children[self.idx as int] is Some,
529            self.children[self.idx as int].unwrap().inv(),
530            self.children[self.idx as int].unwrap().value.parent_level == self.level(),
531            self.children[self.idx as int].unwrap().value.path == self.path().push_tail(self.idx as usize),
532            self.children[self.idx as int].unwrap().level == self.tree_level + 1,
533            <EntryOwner<C> as TreeNodeValue<NR_LEVELS>>::rel_children(
534                self.entry_own, self.idx as int, Some(self.children[self.idx as int].unwrap().value)),
535            !self.children[self.idx as int].unwrap().value.in_scope,
536        ensures
537            self.inv();
538
539    pub proof fn new_child(tracked &self, paddr: Paddr, prop: PageProperty, tracked regions: &mut MetaRegionOwners) -> (tracked res: OwnerSubtree<C>)
540        requires
541            self.inv(),
542            old(regions).slots.contains_key(frame_to_index(paddr)),
543        ensures
544            regions.slot_owners == old(regions).slot_owners,
545            regions.slots == old(regions).slots.remove(frame_to_index(paddr)),
546            res.value == EntryOwner::<C>::new_frame_spec(paddr, self.path().push_tail(self.idx as usize), self.level(), prop, old(regions).slots[frame_to_index(paddr)]),
547            res.inv(),
548            res.level == self.tree_level + 1,
549            res == OwnerSubtree::new_val(res.value, res.level as nat),
550    {
551        let tracked slot_perm = regions.slots.tracked_remove(frame_to_index(paddr));
552        let tracked owner = EntryOwner::<C>::new_frame(paddr, self.path().push_tail(self.idx as usize), self.level(), prop, slot_perm);
553        OwnerSubtree::new_val_tracked(owner, self.tree_level + 1)
554    }
555
556}
557
558pub tracked struct CursorOwner<'rcu, C: PageTableConfig> {
559    pub level: PagingLevel,
560    pub continuations: Map<int, CursorContinuation<'rcu, C>>,
561    pub va: AbstractVaddr,
562    pub guard_level: PagingLevel,
563    pub prefix: AbstractVaddr,
564    pub popped_too_high: bool,
565}
566
567impl<'rcu, C: PageTableConfig> Inv for CursorOwner<'rcu, C> {
568    open spec fn inv(self) -> bool {
569        &&& self.va.inv()
570        &&& 1 <= self.level <= NR_LEVELS
571        &&& self.guard_level <= NR_LEVELS
572        // The top-level index of the cursor's VA must be within the page table config's
573        // managed range. This ensures cursors for UserPtConfig and KernelPtConfig operate
574        // on disjoint portions of the virtual address space.
575        &&& C::TOP_LEVEL_INDEX_RANGE_spec().start <= self.va.index[NR_LEVELS - 1]
576        &&& self.va.index[NR_LEVELS - 1] < C::TOP_LEVEL_INDEX_RANGE_spec().end
577        // The cursor's VA is always at or above the start of the locked range.
578        &&& self.in_locked_range() || self.above_locked_range()
579        // The cursor is allowed to pop out of the guard range only when it reaches the end of the locked range.
580        // This allows the user to reason solely about the current vaddr and not keep track of the cursor's level.
581        &&& self.popped_too_high ==> self.level >= self.guard_level && self.in_locked_range()
582        &&& !self.popped_too_high ==> self.level < self.guard_level || self.above_locked_range()
583        &&& self.continuations[self.level - 1].all_some()
584        &&& forall|i: int| self.level <= i < NR_LEVELS ==> {
585            (#[trigger] self.continuations[i]).all_but_index_some()
586        }
587        &&& self.prefix.inv()
588        &&& forall|i: int| i < self.guard_level ==> self.prefix.index[i] == 0
589        // The cursor's VA shares upper indices with the prefix: as long as
590        // the cursor hasn't popped above guard_level (level <= guard_level),
591        // only indices below guard_level change.
592        &&& self.level <= self.guard_level ==> forall|i: int| self.guard_level <= i < NR_LEVELS ==>
593            self.va.index[i] == self.prefix.index[i]
594        &&& self.level <= 4 ==> {
595            &&& self.continuations.contains_key(3)
596            &&& self.continuations[3].inv()
597            &&& self.continuations[3].level() == 4
598            // Obviously there is no level 5 pt, but that would be the level of the parent of the root pt.
599            &&& self.continuations[3].entry_own.parent_level == 5
600            &&& self.va.index[3] == self.continuations[3].idx
601        }
602        &&& self.level <= 3 ==> {
603            &&& self.continuations.contains_key(2)
604            &&& self.continuations[2].inv()
605            &&& self.continuations[2].level() == 3
606            &&& self.continuations[2].entry_own.parent_level == 4
607            &&& self.va.index[2] == self.continuations[2].idx
608            &&& self.continuations[2].guard_perm.value().inner.inner@.ptr.addr() !=
609                self.continuations[3].guard_perm.value().inner.inner@.ptr.addr()
610            // Path consistency: child path = parent path pushed with parent's index
611            &&& self.continuations[2].path() == self.continuations[3].path().push_tail(self.continuations[3].idx as usize)
612        }
613        &&& self.level <= 2 ==> {
614            &&& self.continuations.contains_key(1)
615            &&& self.continuations[1].inv()
616            &&& self.continuations[1].level() == 2
617            &&& self.continuations[1].entry_own.parent_level == 3
618            &&& self.va.index[1] == self.continuations[1].idx
619            &&& self.continuations[1].guard_perm.value().inner.inner@.ptr.addr() !=
620                self.continuations[2].guard_perm.value().inner.inner@.ptr.addr()
621            &&& self.continuations[1].guard_perm.value().inner.inner@.ptr.addr() !=
622                self.continuations[3].guard_perm.value().inner.inner@.ptr.addr()
623            // Path consistency: child path = parent path pushed with parent's index
624            &&& self.continuations[1].path() == self.continuations[2].path().push_tail(self.continuations[2].idx as usize)
625        }
626        &&& self.level == 1 ==> {
627            &&& self.continuations.contains_key(0)
628            &&& self.continuations[0].inv()
629            &&& self.continuations[0].level() == 1
630            &&& self.continuations[0].entry_own.parent_level == 2
631            &&& self.va.index[0] == self.continuations[0].idx
632            &&& self.continuations[0].guard_perm.value().inner.inner@.ptr.addr() !=
633                self.continuations[1].guard_perm.value().inner.inner@.ptr.addr()
634            &&& self.continuations[0].guard_perm.value().inner.inner@.ptr.addr() !=
635                self.continuations[2].guard_perm.value().inner.inner@.ptr.addr()
636            &&& self.continuations[0].guard_perm.value().inner.inner@.ptr.addr() !=
637                self.continuations[3].guard_perm.value().inner.inner@.ptr.addr()
638            // Path consistency: child path = parent path pushed with parent's index
639            &&& self.continuations[0].path() == self.continuations[1].path().push_tail(self.continuations[1].idx as usize)
640        }
641    }
642}
643
644impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C> {
645
646    pub open spec fn node_unlocked(guards: Guards<'rcu, C>) ->
647        (spec_fn(EntryOwner<C>, TreePath<NR_ENTRIES>) -> bool) {
648        |owner: EntryOwner<C>, path: TreePath<NR_ENTRIES>|
649            owner.is_node() ==>
650            guards.unlocked(owner.node.unwrap().meta_perm.addr())
651    }
652
653    pub open spec fn node_unlocked_except(guards: Guards<'rcu, C>, addr: usize) ->
654        (spec_fn(EntryOwner<C>, TreePath<NR_ENTRIES>) -> bool) {
655        |owner: EntryOwner<C>, path: TreePath<NR_ENTRIES>|
656            owner.is_node() ==>
657            owner.node.unwrap().meta_perm.addr() != addr ==>
658            guards.unlocked(owner.node.unwrap().meta_perm.addr())
659    }
660
661    pub open spec fn map_full_tree(self, f: spec_fn(EntryOwner<C>, TreePath<NR_ENTRIES>) -> bool) -> bool {
662        forall|i: int|
663            #![trigger self.continuations[i]]
664            self.level - 1 <= i < NR_LEVELS ==> {
665                self.continuations[i].map_children(f)
666            }
667    }
668
669    pub open spec fn map_only_children(self, f: spec_fn(EntryOwner<C>, TreePath<NR_ENTRIES>) -> bool) -> bool {
670        forall|i: int|
671            #![trigger self.continuations[i]]
672            self.level - 1 <= i < NR_LEVELS ==>
673            self.continuations[i].map_children(f)
674    }
675
676    pub open spec fn children_not_locked(self, guards: Guards<'rcu, C>) -> bool {
677        self.map_only_children(Self::node_unlocked(guards))
678    }
679
680    pub open spec fn only_current_locked(self, guards: Guards<'rcu, C>) -> bool {
681        self.map_only_children(Self::node_unlocked_except(guards, self.cur_entry_owner().node.unwrap().meta_perm.addr()))
682    }
683
684    pub proof fn never_drop_restores_children_not_locked(
685        self,
686        guard: PageTableGuard<'rcu, C>,
687        guards0: Guards<'rcu, C>,
688        guards1: Guards<'rcu, C>)
689    requires
690        self.inv(),
691        self.only_current_locked(guards0),
692        <PageTableGuard<'rcu, C> as TrackDrop>::constructor_requires(guard, guards0),
693        <PageTableGuard<'rcu, C> as TrackDrop>::constructor_ensures(guard, guards0, guards1),
694    ensures
695        self.children_not_locked(guards1),
696    {
697        admit()
698    }
699
700    /// After dropping the guard for the popped level, `nodes_locked` is preserved
701    /// for the new (higher-level) owner, because the dropped guard's address is not
702    /// among those checked by `nodes_locked` (which covers levels >= self.level - 1).
703    pub axiom fn never_drop_restores_nodes_locked(
704        self,
705        guard: PageTableGuard<'rcu, C>,
706        guards0: Guards<'rcu, C>,
707        guards1: Guards<'rcu, C>,
708    )
709        requires
710            self.inv(),
711            self.nodes_locked(guards0),
712            <PageTableGuard<'rcu, C> as TrackDrop>::constructor_requires(guard, guards0),
713            <PageTableGuard<'rcu, C> as TrackDrop>::constructor_ensures(guard, guards0, guards1),
714            forall|i: int| #![trigger self.continuations[i]]
715                self.level - 1 <= i < NR_LEVELS ==>
716                    self.continuations[i].guard_perm.value().inner.inner@.ptr.addr()
717                        != guard.inner.inner@.ptr.addr(),
718        ensures
719            self.nodes_locked(guards1);
720
721    /// After a `protect` operation that only modifies `frame.prop` of the current entry,
722    /// `CursorOwner::inv()` and `relate_region` are preserved.
723    ///
724    /// Safety: `protect` changes only `frame.prop` and updates `parent.children_perm` to match.
725    /// `EntryOwner::inv()` is preserved (from protect postcondition).
726    /// `relate_region` is preserved because it doesn't use `frame.prop`.
727    /// `rel_children` holds via `match_pte` (from protect's `wf`/`node_matching` postconditions).
728    ///
729    /// The axiom requires only the semantic properties of the modified entry that are
730    /// checked by `inv` and `relate_region`; the structural identity of other continuations
731    /// is trusted to hold from the tracked restore operations in the caller.
732    pub proof fn protect_preserves_cursor_inv_relate(
733        self,
734        other: Self,
735        regions: MetaRegionOwners,
736    )
737        requires
738            self.inv(),
739            self.relate_region(regions),
740            self.cur_entry_owner().is_frame(),
741            other.cur_entry_owner().is_frame(),
742            other.cur_entry_owner().inv(),
743            // protect preserves PA, slot_perm, path, parent_level
744            other.cur_entry_owner().frame.unwrap().mapped_pa == self.cur_entry_owner().frame.unwrap().mapped_pa,
745            other.cur_entry_owner().frame.unwrap().slot_perm == self.cur_entry_owner().frame.unwrap().slot_perm,
746            other.cur_entry_owner().path == self.cur_entry_owner().path,
747            other.cur_entry_owner().parent_level == self.cur_entry_owner().parent_level,
748            // cursor level and structural fields unchanged
749            self.level == other.level,
750            self.guard_level == other.guard_level,
751            self.va == other.va,
752            self.prefix == other.prefix,
753            self.popped_too_high == other.popped_too_high,
754            // higher-level continuations unchanged
755            forall|i: int| self.level <= i < NR_LEVELS ==>
756                #[trigger] self.continuations[i] == other.continuations[i],
757            // bottom continuation well-formed after protect
758            other.continuations[self.level - 1].inv(),
759            other.continuations[self.level - 1].all_some(),
760        ensures
761            other.inv(),
762            other.relate_region(regions)
763    { admit() }
764
765    pub proof fn map_children_implies(
766        self,
767        f: spec_fn(EntryOwner<C>, TreePath<NR_ENTRIES>) -> bool,
768        g: spec_fn(EntryOwner<C>, TreePath<NR_ENTRIES>) -> bool,
769    )
770    requires
771        self.inv(),
772        OwnerSubtree::implies(f, g),
773        forall|i: int|
774            #![trigger self.continuations[i]]
775                self.level - 1 <= i < NR_LEVELS ==>
776                    self.continuations[i].map_children(f),
777    ensures
778        forall|i: int|
779            #![trigger self.continuations[i]]
780            self.level - 1 <= i < NR_LEVELS ==>
781                self.continuations[i].map_children(g),
782    {
783        assert forall|i: int|
784            #![trigger self.continuations[i]]
785            self.level - 1 <= i < NR_LEVELS implies self.continuations[i].map_children(g) by {
786                let cont = self.continuations[i];
787                reveal(CursorContinuation::inv_children);
788                assert forall|j: int|
789                    #![trigger cont.children[j]]
790                    0 <= j < cont.children.len() && cont.children[j] is Some
791                        implies cont.children[j].unwrap().tree_predicate_map(cont.path().push_tail(j as usize), g) by {
792                            cont.inv_children_unroll(j);
793                            OwnerSubtree::map_implies(cont.children[j].unwrap(), cont.path().push_tail(j as usize), f, g);
794                    }
795            }
796    }
797
798    pub open spec fn nodes_locked(self, guards: Guards<'rcu, C>) -> bool {
799        forall|i: int|
800            #![trigger self.continuations[i]]
801            self.level - 1 <= i < NR_LEVELS ==> {
802                self.continuations[i].node_locked(guards)
803            }
804    }
805
806    pub open spec fn index(self) -> usize {
807        self.continuations[self.level - 1].idx
808    }
809
810    pub open spec fn inc_index(self) -> Self {
811        Self {
812            continuations: self.continuations.insert(self.level - 1, self.continuations[self.level - 1].inc_index()),
813            va: AbstractVaddr {
814                offset: self.va.offset,
815                index: self.va.index.insert(self.level - 1, self.continuations[self.level - 1].inc_index().idx as int)
816            },
817            popped_too_high: false,
818            ..self
819        }
820    }
821
822    pub open spec fn zero_below_level_rec(self, level: PagingLevel) -> Self
823        decreases self.level - level
824    {
825        if self.level <= level {
826            self
827        } else {
828            Self {
829                va: AbstractVaddr {
830                    offset: self.va.offset,
831                    index: self.va.index.insert(level - 1, 0)
832                },
833                ..self.zero_below_level_rec((level + 1) as u8)
834            }
835        }
836    }
837
838    pub open spec fn zero_below_level(self) -> Self {
839        self.zero_below_level_rec(1u8)
840    }
841
842    pub proof fn zero_below_level_rec_preserves_above(self, level: PagingLevel)
843        ensures
844            forall |lv: int| lv >= self.level ==>  self.zero_below_level_rec(level).va.index[lv] == #[trigger] self.va.index[lv],
845        decreases self.level - level,
846    {
847        if self.level > level {
848            self.zero_below_level_rec_preserves_above((level + 1) as u8);
849        }
850    }
851
852    pub proof fn zero_preserves_above(self)
853        ensures
854            forall |lv: int| lv >= self.level ==> self.zero_below_level().va.index[lv] == #[trigger] self.va.index[lv],
855    {
856        self.zero_below_level_rec_preserves_above(1u8);
857    }
858
859    pub axiom fn do_zero_below_level(tracked &mut self)
860        requires
861            old(self).inv(),
862        ensures
863            *self == old(self).zero_below_level();
864
865    pub proof fn do_inc_index(tracked &mut self)
866        requires
867            old(self).inv(),
868            old(self).continuations[old(self).level - 1].idx + 1 < NR_ENTRIES,
869            old(self).level == NR_LEVELS ==>
870                (old(self).continuations[old(self).level - 1].idx + 1) < C::TOP_LEVEL_INDEX_RANGE_spec().end,
871        ensures
872            self.inv(),
873            *self == old(self).inc_index(),
874    {
875        reveal(CursorContinuation::inv_children);
876        self.popped_too_high = false;
877        let tracked mut cont = self.continuations.tracked_remove(self.level - 1);
878        if self.level >= self.guard_level {
879            let ghost size = self.locked_range().end - self.locked_range().start;
880            let ghost new_va = AbstractVaddr {
881                offset: self.va.offset,
882                index: self.va.index.insert(self.level - 1, cont.idx + 1)
883            };
884            assert(new_va.to_vaddr() == self.va.to_vaddr() + size) by { admit() };
885        }
886
887        cont.do_inc_index();
888        self.va.index.tracked_insert(self.level - 1, cont.idx as int);
889        self.continuations.tracked_insert(self.level - 1, cont);
890        assert(self.continuations == old(self).continuations.insert(self.level - 1, cont));
891        assert(self.in_locked_range() || self.above_locked_range()) by { admit() };
892    }
893
894    pub proof fn inc_and_zero_increases_va(self)
895        requires
896            self.inv()
897        ensures
898            self.inc_index().zero_below_level().va.to_vaddr() > self.va.to_vaddr(),
899    {
900        admit()
901    }
902
903    pub proof fn zero_rec_preserves_all_but_va(self, level: PagingLevel)
904        ensures
905            self.zero_below_level_rec(level).level == self.level,
906            self.zero_below_level_rec(level).continuations == self.continuations,
907            self.zero_below_level_rec(level).guard_level == self.guard_level,
908            self.zero_below_level_rec(level).prefix == self.prefix,
909            self.zero_below_level_rec(level).popped_too_high == self.popped_too_high,
910        decreases self.level - level,
911    {
912        if self.level > level {
913            self.zero_rec_preserves_all_but_va((level + 1) as u8);
914        }
915    }
916
917    pub proof fn zero_preserves_all_but_va(self)
918        ensures
919            self.zero_below_level().level == self.level,
920            self.zero_below_level().continuations == self.continuations,
921            self.zero_below_level().guard_level == self.guard_level,
922            self.zero_below_level().prefix == self.prefix,
923            self.zero_below_level().popped_too_high == self.popped_too_high,
924    {
925        self.zero_rec_preserves_all_but_va(1u8);
926    }
927
928    pub open spec fn cur_va(self) -> Vaddr {
929        self.va.to_vaddr()
930    }
931
932    pub open spec fn cur_va_range(self) -> Range<AbstractVaddr> {
933        let start = self.va.align_down(self.level as int);
934        let end = self.va.align_up(self.level as int);
935        Range { start, end }
936    }
937
938    pub proof fn cur_va_range_reflects_view(self)
939        requires
940            self.inv(),
941        ensures
942            self.cur_va_range().start.reflect(self@.query_range().start),
943            self.cur_va_range().end.reflect(self@.query_range().end),
944    {
945        admit()
946    }
947
948    /// The current virtual address falls within the VA range of the current subtree's path.
949    pub proof fn cur_va_in_subtree_range(self)
950        requires
951            self.inv(),
952        ensures
953            vaddr(self.cur_subtree().value.path) <= self.cur_va() < vaddr(self.cur_subtree().value.path) + page_size(self.level as PagingLevel)
954    {
955        let L = self.level as int;
956        let cont = self.continuations[L - 1];
957        let subtree_path = cont.path().push_tail(cont.idx as usize);
958        let va_path = self.va.to_path(L - 1);
959
960        self.va.to_path_len(L - 1);
961        cont.path().push_tail_property_len(cont.idx as usize);
962        assert(cont.level() == self.level) by {
963            if L == 1 {} else if L == 2 {} else if L == 3 {} else {}
964        };
965
966        assert forall|i: int| 0 <= i < subtree_path.len() implies
967            subtree_path.index(i) == va_path.index(i) by {
968            self.va.to_path_index(L - 1, i);
969            if L == 4 {
970                cont.path().push_tail_property_index(cont.idx as usize);
971            } else if L == 3 {
972                cont.path().push_tail_property_index(cont.idx as usize);
973                self.continuations[3].path().push_tail_property_index(self.continuations[3].idx as usize);
974            } else if L == 2 {
975                cont.path().push_tail_property_index(cont.idx as usize);
976                self.continuations[2].path().push_tail_property_index(self.continuations[2].idx as usize);
977                self.continuations[3].path().push_tail_property_index(self.continuations[3].idx as usize);
978            } else {
979                cont.path().push_tail_property_index(cont.idx as usize);
980                self.continuations[1].path().push_tail_property_index(self.continuations[1].idx as usize);
981                self.continuations[2].path().push_tail_property_index(self.continuations[2].idx as usize);
982                self.continuations[3].path().push_tail_property_index(self.continuations[3].idx as usize);
983            }
984        };
985
986        self.va.to_path_inv(L - 1);
987        self.cur_subtree_inv();
988        AbstractVaddr::rec_vaddr_eq_if_indices_eq(subtree_path, va_path, 0);
989        self.va.vaddr_range_from_path(L - 1);
990    }
991
992    /// The current subtree's mappings equal the filter over [cur_va, cur_va + page_size(level)).
993    pub proof fn cur_subtree_eq_filtered_mappings(self)
994        requires
995            self.inv(),
996        ensures
997            PageTableOwner(self.cur_subtree())@.mappings ==
998                self@.mappings.filter(|m: Mapping| self@.cur_va <= m.va_range.start < self@.cur_va + page_size(self.level)),
999    {
1000        let cur_subtree = self.cur_subtree();
1001        let cur_path = cur_subtree.value.path;
1002        let cur_va = self.cur_va();
1003        let size = page_size(self.level);
1004
1005        let subtree_mappings = PageTableOwner(cur_subtree)@.mappings;
1006        let filtered = self@.mappings.filter(|m: Mapping| cur_va <= m.va_range.start < cur_va + size);
1007
1008        assert forall |m: Mapping| subtree_mappings.contains(m) implies filtered.contains(m) by {
1009            admit();
1010        };
1011        assert forall |m: Mapping| filtered.contains(m) implies subtree_mappings.contains(m) by {
1012            admit();
1013        };
1014
1015        assert(subtree_mappings =~= filtered);
1016    }
1017
1018    /// Subtrees at different indices have disjoint VA ranges.
1019    pub proof fn subtree_va_ranges_disjoint(self, j: int)
1020        requires
1021            self.inv(),
1022            0 <= j < NR_ENTRIES,
1023            j != self.index(),
1024            self.continuations[self.level - 1].children[j] is Some,
1025        ensures
1026            vaddr(self.continuations[self.level - 1].path().push_tail(j as usize)) + page_size(self.level as PagingLevel) <= self.cur_va()
1027            || self.cur_va() < vaddr(self.continuations[self.level - 1].path().push_tail(j as usize))
1028    { admit() }
1029
1030    /// Children of higher-level continuations have VA ranges that don't include cur_va,
1031    /// because cur_va's indices at those levels match the path to the current position.
1032    pub proof fn higher_level_children_disjoint(self, i: int, j: int)
1033        requires
1034            self.inv(),
1035            self.level - 1 < i < NR_LEVELS,
1036            0 <= j < NR_ENTRIES,
1037            j != self.continuations[i].idx,
1038            self.continuations[i].children[j] is Some,
1039        ensures
1040            vaddr(self.continuations[i].path().push_tail(j as usize)) + page_size((i + 1) as PagingLevel) <= self.cur_va()
1041            || self.cur_va() < vaddr(self.continuations[i].path().push_tail(j as usize))
1042    { admit() }
1043
1044    /// Any mapping that covers cur_va must come from the current subtree.
1045    /// This follows from the disjointness of VA ranges and the fact that
1046    /// cur_va falls within the current subtree's VA range.
1047    pub proof fn mapping_covering_cur_va_from_cur_subtree(self, m: Mapping)
1048        requires
1049            self.inv(),
1050            self.view_mappings().contains(m),
1051            m.va_range.start <= self.cur_va() < m.va_range.end,
1052        ensures
1053            PageTableOwner(self.cur_subtree()).view_rec(self.cur_subtree().value.path).contains(m),
1054    {
1055        let cur_va = self.cur_va();
1056
1057        // m comes from some continuation level i
1058        let i = choose|i: int| self.level - 1 <= i < NR_LEVELS
1059            && #[trigger] self.continuations[i].view_mappings().contains(m);
1060        self.inv_continuation(i);
1061
1062        let cont_i = self.continuations[i];
1063        let j = choose|j: int| #![auto] 0 <= j < NR_ENTRIES
1064            && cont_i.children[j] is Some
1065            && PageTableOwner(cont_i.children[j].unwrap())
1066                .view_rec(cont_i.path().push_tail(j as usize)).contains(m);
1067
1068        cont_i.inv_children_unroll(j);
1069        let child_j = cont_i.children[j].unwrap();
1070        let path_j = cont_i.path().push_tail(j as usize);
1071        PageTableOwner(child_j).view_rec_vaddr_range(path_j, m);
1072
1073        if i == self.level - 1 {
1074            if j as usize != self.index() {
1075                self.subtree_va_ranges_disjoint(j);
1076            }
1077        } else {
1078            if j as usize != cont_i.idx as usize {
1079                self.higher_level_children_disjoint(i, j);
1080            } else {
1081                assert(cont_i.children[cont_i.idx as int] is None);
1082                assert(false);
1083            }
1084        }
1085    }
1086
1087    pub proof fn inv_continuation(self, i: int)
1088        requires
1089            self.inv(),
1090            self.level - 1 <= i <= NR_LEVELS - 1,
1091        ensures
1092            self.continuations.contains_key(i),
1093            self.continuations[i].inv(),
1094            self.continuations[i].children.len() == NR_ENTRIES,
1095    {
1096        assert(self.continuations.contains_key(i));
1097    }
1098
1099    pub open spec fn view_mappings(self) -> Set<Mapping>
1100    {
1101        Set::new(
1102            |m: Mapping|
1103            exists|i:int|
1104            #![trigger self.continuations[i]]
1105            self.level - 1 <= i < NR_LEVELS && self.continuations[i].view_mappings().contains(m)
1106        )
1107    }
1108
1109    pub proof fn view_mappings_take_lowest(self, new: Self)
1110        requires
1111            self.inv(),
1112            new.continuations == self.continuations.remove(self.level - 1),
1113        ensures
1114            new.view_mappings() == self.view_mappings() - self.continuations[self.level - 1].view_mappings(),
1115    {
1116        admit()
1117    }
1118
1119    pub proof fn view_mappings_put_lowest(self, new: Self, cont: CursorContinuation<C>)
1120        requires
1121            cont.inv(),
1122            new.inv(),
1123            new.continuations == self.continuations.insert(self.level - 1, cont),
1124        ensures
1125            new.view_mappings() == self.view_mappings() + cont.view_mappings(),
1126    {
1127        admit()
1128    }
1129
1130    pub open spec fn as_page_table_owner(self) -> PageTableOwner<C> {
1131        if self.level == 1 {
1132            let l1 = self.continuations[0];
1133            let l2 = self.continuations[1].restore_spec(l1).0;
1134            let l3 = self.continuations[2].restore_spec(l2).0;
1135            let l4 = self.continuations[3].restore_spec(l3).0;
1136            l4.as_page_table_owner()
1137        } else if self.level == 2 {
1138            let l2 = self.continuations[1];
1139            let l3 = self.continuations[2].restore_spec(l2).0;
1140            let l4 = self.continuations[3].restore_spec(l3).0;
1141            l4.as_page_table_owner()
1142        } else if self.level == 3 {
1143            let l3 = self.continuations[2];
1144            let l4 = self.continuations[3].restore_spec(l3).0;
1145            l4.as_page_table_owner()
1146        } else {
1147            let l4 = self.continuations[3];
1148            l4.as_page_table_owner()
1149        }
1150    }
1151
1152    pub proof fn as_page_table_owner_preserves_view_mappings(self)
1153        requires
1154            self.inv(),
1155        ensures
1156            self.as_page_table_owner().view_rec(self.continuations[3].path()) == self.view_mappings(),
1157    {
1158        admit()
1159    }
1160
1161    pub open spec fn cur_entry_owner(self) -> EntryOwner<C> {
1162        self.cur_subtree().value
1163    }
1164
1165    pub open spec fn cur_subtree(self) -> OwnerSubtree<C> {
1166        self.continuations[self.level - 1].children[self.index() as int].unwrap()
1167    }
1168
1169    /// Borrows the slot permission from the current frame entry owner.
1170    ///
1171    /// This is an axiom because expressing the structural borrow through the
1172    /// nested Map/Seq/Option layers is not yet supported directly in Verus.
1173    /// The axiom is safe: it only provides a shared reference to data already
1174    /// logically owned by `self`, and the borrow cannot outlive `self`.
1175    pub axiom fn borrow_cur_frame_slot_perm(tracked &self) -> (tracked res: &vstd::simple_pptr::PointsTo<crate::mm::frame::meta::MetaSlot>)
1176        requires
1177            self.cur_entry_owner().is_frame(),
1178        ensures
1179            *res == self.cur_entry_owner().frame.unwrap().slot_perm;
1180
1181    /// Axiom: the item reconstructed from the current frame's physical address satisfies `clone_requires`.
1182    ///
1183    /// Safety: When `relate_region` holds for a frame entry, the item reconstructed via
1184    /// `item_from_raw_spec(pa, ...)` is the original frame item.  The frame's slot permission
1185    /// (owned by the cursor) has the correct address, is initialised, and its ref count is in the
1186    /// valid clonable range (> 0, < REF_COUNT_MAX), so `clone_requires` is satisfied.
1187    pub proof fn cur_frame_clone_requires(
1188        self,
1189        item: C::Item,
1190        pa: Paddr,
1191        level: PagingLevel,
1192        prop: PageProperty,
1193        regions: MetaRegionOwners,
1194    )
1195        requires
1196            self.inv(),
1197            self.relate_region(regions),
1198            self.cur_entry_owner().is_frame(),
1199            pa == self.cur_entry_owner().frame.unwrap().mapped_pa,
1200            C::item_from_raw_spec(pa, level, prop) == item,
1201        ensures
1202            item.clone_requires(
1203                self.cur_entry_owner().frame.unwrap().slot_perm,
1204                regions.slot_owners[frame_to_index(pa)].inner_perms.ref_count,
1205            )
1206    { admit() }
1207
1208    /// Incrementing the ref count of the current frame preserves `regions.inv()` and
1209    /// `self.relate_region(new_regions)`.
1210    pub proof fn clone_item_preserves_invariants(
1211        self,
1212        old_regions: MetaRegionOwners,
1213        new_regions: MetaRegionOwners,
1214        idx: usize,
1215    )
1216        requires
1217            self.inv(),
1218            self.relate_region(old_regions),
1219            old_regions.inv(),
1220            self.cur_entry_owner().is_frame(),
1221            idx == frame_to_index(self.cur_entry_owner().frame.unwrap().mapped_pa),
1222            old_regions.slot_owners.contains_key(idx),
1223            new_regions.slot_owners.contains_key(idx),
1224            // rc at idx is incremented by 1
1225            new_regions.slot_owners[idx].inner_perms.ref_count.value() ==
1226                old_regions.slot_owners[idx].inner_perms.ref_count.value() + 1,
1227            // All other inner_perms fields at idx are identical (same tracked object)
1228            new_regions.slot_owners[idx].inner_perms.ref_count.id() ==
1229                old_regions.slot_owners[idx].inner_perms.ref_count.id(),
1230            new_regions.slot_owners[idx].inner_perms.storage ==
1231                old_regions.slot_owners[idx].inner_perms.storage,
1232            new_regions.slot_owners[idx].inner_perms.vtable_ptr ==
1233                old_regions.slot_owners[idx].inner_perms.vtable_ptr,
1234            new_regions.slot_owners[idx].inner_perms.in_list ==
1235                old_regions.slot_owners[idx].inner_perms.in_list,
1236            // Other MetaSlotOwner fields at idx unchanged
1237            new_regions.slot_owners[idx].path_if_in_pt ==
1238                old_regions.slot_owners[idx].path_if_in_pt,
1239            new_regions.slot_owners[idx].self_addr == old_regions.slot_owners[idx].self_addr,
1240            new_regions.slot_owners[idx].raw_count == old_regions.slot_owners[idx].raw_count,
1241            new_regions.slot_owners[idx].usage == old_regions.slot_owners[idx].usage,
1242            // All other slot_owners unchanged
1243            new_regions.slot_owners.dom() == old_regions.slot_owners.dom(),
1244            forall |i: usize| #![trigger new_regions.slot_owners[i]]
1245                i != idx && old_regions.slot_owners.contains_key(i) ==>
1246                new_regions.slot_owners[i] == old_regions.slot_owners[i],
1247            // slots map unchanged
1248            new_regions.slots == old_regions.slots,
1249            // rc overflow guard: old rc is a normal shared count; rc+1 stays below REF_COUNT_MAX.
1250            0 < old_regions.slot_owners[idx].inner_perms.ref_count.value(),
1251            old_regions.slot_owners[idx].inner_perms.ref_count.value() + 1 < REF_COUNT_MAX,
1252        ensures
1253            new_regions.inv(),
1254            self.relate_region(new_regions),
1255    {
1256        self.cont_entries_relate_region(old_regions);
1257        assert(new_regions.slot_owners[idx].inv());
1258        assert(new_regions.inv()) by {
1259            assert forall |i: usize| #[trigger] new_regions.slots.contains_key(i) implies {
1260                &&& new_regions.slot_owners.contains_key(i)
1261                &&& new_regions.slot_owners[i].inv()
1262                &&& new_regions.slots[i].is_init()
1263                &&& new_regions.slots[i].value().wf(new_regions.slot_owners[i])
1264                &&& new_regions.slot_owners[i].self_addr == new_regions.slots[i].addr()
1265            } by { assert(old_regions.slots.contains_key(i)); };
1266            assert forall |i: usize| #[trigger] new_regions.slot_owners.contains_key(i) implies
1267                new_regions.slot_owners[i].inv() by {};
1268        };
1269        self.relate_region_slot_owners_rc_increment(old_regions, new_regions, idx);
1270    }
1271
1272    /// If the current entry is a page table node, the cursor must be at level >= 2.
1273    ///
1274    /// Proof: the current child subtree has `is_node()`, so by the ghost-tree `la_inv`
1275    /// (`is_node() ==> tree_level < INC_LEVELS - 1`), its tree level satisfies
1276    /// `INC_LEVELS - self.level < INC_LEVELS - 1`, i.e., `self.level > 1`.
1277    pub proof fn cur_entry_node_implies_level_gt_1(self)
1278        requires
1279            self.inv(),
1280            self.cur_entry_owner().is_node(),
1281        ensures
1282            self.level > 1,
1283    {
1284        self.cur_subtree_inv();
1285        let cont = self.continuations[self.level - 1];
1286        let child = self.cur_subtree();
1287        assert(child.level < INC_LEVELS - 1);
1288        assert(cont.level() == self.level) by {
1289            if self.level == 1 {} else if self.level == 2 {} else if self.level == 3 {} else {}
1290        };
1291    }
1292
1293    /// A frame entry at the cursor's current level that doesn't fit the aligned range
1294    /// `[cur_va, end)` must be at level > 1.
1295    /// Justification: At level 1, `page_size(1) == BASE_PAGE_SIZE`. Since the cursor VA
1296    /// and `end` are BASE_PAGE_SIZE-aligned and `cur_va < end`, we have
1297    /// `cur_va + page_size(1) <= end`, so a level-1 frame always fits. Therefore
1298    /// `!cur_entry_fits_range` implies `level > 1`.
1299    pub proof fn frame_not_fits_implies_level_gt_1(
1300        self,
1301        cur_entry_fits_range: bool,
1302        cur_va: Vaddr,
1303        end: Vaddr,
1304    )
1305        requires
1306            self.inv(),
1307            self.cur_entry_owner().is_frame(),
1308            !cur_entry_fits_range,
1309            cur_va < end,
1310            // Definition: cur_entry_fits_range iff cur_va is at start of entry AND entry end <= end.
1311            cur_entry_fits_range == (
1312                cur_va == self.cur_va_range().start.to_vaddr()
1313                && self.cur_va_range().end.to_vaddr() <= end),
1314            // cur_va and end are PAGE_SIZE-aligned
1315            cur_va as nat % PAGE_SIZE as nat == 0,
1316            end as nat % PAGE_SIZE as nat == 0,
1317        ensures
1318            self.level > 1
1319    { admit() }
1320
1321    /// When the current entry is a PT node at level `self.level`, any mapping at `cur_va` has
1322    /// `page_size <= page_size(self.level - 1)`.  Therefore `split_while_huge` at
1323    /// `page_size(self.level - 1)` does not split anything and is a no-op on the abstract view.
1324    /// When present, the query_mapping is from the current subtree's view_rec.
1325    proof fn query_mapping_from_subtree(self, qm: Mapping)
1326        requires
1327            self.inv(),
1328            self@.inv(),
1329            self@.present(),
1330            qm == self@.query_mapping(),
1331        ensures
1332            PageTableOwner(self.cur_subtree()).view_rec(self.cur_subtree().value.path).contains(qm),
1333    {
1334        let f = self@.mappings.filter(|m2: Mapping| m2.va_range.start <= self@.cur_va < m2.va_range.end);
1335        axiom_set_intersect_finite::<Mapping>(
1336            self@.mappings, Set::new(|m2: Mapping| m2.va_range.start <= self@.cur_va < m2.va_range.end));
1337        axiom_set_choose_len(f);
1338        self.mapping_covering_cur_va_from_cur_subtree(qm);
1339    }
1340
1341    pub proof fn split_while_huge_node_noop(self)
1342        requires
1343            self.inv(),
1344            self.cur_entry_owner().is_node(),
1345            self.level > 1,
1346        ensures
1347            self@.split_while_huge(page_size((self.level - 1) as PagingLevel)) == self@
1348    {
1349        self.view_preserves_inv();
1350        if self@.present() {
1351            self.cur_subtree_inv();
1352            let subtree = self.cur_subtree();
1353            let path = subtree.value.path;
1354            let qm = self@.query_mapping();
1355            self.query_mapping_from_subtree(qm);
1356            let cont = self.continuations[self.level - 1];
1357            cont.path().push_tail_property_len(cont.idx as usize);
1358            PageTableOwner(subtree).view_rec_node_page_size_bound(path, qm);
1359        }
1360    }
1361
1362    /// When the current entry is absent, there is no mapping at `cur_va` in the abstract view,
1363    /// so `split_while_huge` finds nothing to split and is a no-op for any target size.
1364    pub proof fn split_while_huge_absent_noop(self, size: usize)
1365        requires
1366            self.inv(),
1367            self.cur_entry_owner().is_absent(),
1368        ensures
1369            self@.split_while_huge(size) == self@,
1370    {
1371        self.view_preserves_inv();
1372        self.cur_entry_absent_not_present();
1373    }
1374
1375    pub proof fn split_while_huge_at_level_noop(self)
1376        requires
1377            self.inv(),
1378        ensures
1379            self@.split_while_huge(page_size(self.level as PagingLevel)) == self@
1380    {
1381        self.view_preserves_inv();
1382        if self@.present() {
1383            self.cur_subtree_inv();
1384            let subtree = self.cur_subtree();
1385            let path = subtree.value.path;
1386            let qm = self@.query_mapping();
1387            self.query_mapping_from_subtree(qm);
1388            let cont = self.continuations[self.level - 1];
1389            cont.path().push_tail_property_len(cont.idx as usize);
1390            PageTableOwner(subtree).view_rec_page_size_bound(path, qm);
1391        }
1392    }
1393
1394    /// A new frame subtree at the current position has mappings equal to the singleton
1395    /// mapping covering the current slot range.
1396    pub proof fn new_child_mappings_eq_target(
1397        self,
1398        new_subtree: OwnerSubtree<C>,
1399        pa: Paddr,
1400        level: PagingLevel,
1401        prop: PageProperty,
1402    )
1403        requires
1404            self.inv(),
1405            level == self.level,
1406            new_subtree.inv(),
1407            new_subtree.value.is_frame(),
1408            new_subtree.value.path ==
1409                self.continuations[self.level as int - 1].path()
1410                    .push_tail(self.continuations[self.level as int - 1].idx as usize),
1411            new_subtree.value.frame.unwrap().mapped_pa == pa,
1412            new_subtree.value.frame.unwrap().prop == prop,
1413        ensures
1414            PageTableOwner(new_subtree)@.mappings == set![Mapping {
1415                va_range: self@.cur_slot_range(page_size(level)),
1416                pa_range: pa..(pa + page_size(level)) as usize,
1417                page_size: page_size(level),
1418                property: prop,
1419            }]
1420    {
1421        let path = new_subtree.value.path;
1422        let ps = page_size(level);
1423        let pt_level = INC_LEVELS - path.len();
1424        let cont = self.continuations[self.level as int - 1];
1425
1426        cont.path().push_tail_property_len(cont.idx as usize);
1427        assert(cont.level() == self.level) by {
1428            if self.level == 1 {} else if self.level == 2 {} else if self.level == 3 {} else {}
1429        };
1430        assert(pt_level == self.level);
1431
1432        // Reuse cur_va_in_subtree_range's vaddr equality + to_path_vaddr_concrete.
1433        self.cur_va_in_subtree_range();
1434        assert(vaddr(path) == nat_align_down(self@.cur_va as nat, ps as nat) as Vaddr) by {
1435            self.va.to_path_vaddr_concrete(self.level as int - 1);
1436            let va_path = self.va.to_path(self.level as int - 1);
1437            self.va.to_path_len(self.level as int - 1);
1438            self.va.to_path_inv(self.level as int - 1);
1439            self.cur_subtree_inv();
1440            assert forall|i: int| 0 <= i < path.len() implies path.index(i) == va_path.index(i) by {
1441                self.va.to_path_index(self.level as int - 1, i);
1442                if self.level == 4 {
1443                    cont.path().push_tail_property_index(cont.idx as usize);
1444                } else if self.level == 3 {
1445                    cont.path().push_tail_property_index(cont.idx as usize);
1446                    self.continuations[3].path().push_tail_property_index(self.continuations[3].idx as usize);
1447                } else if self.level == 2 {
1448                    cont.path().push_tail_property_index(cont.idx as usize);
1449                    self.continuations[2].path().push_tail_property_index(self.continuations[2].idx as usize);
1450                    self.continuations[3].path().push_tail_property_index(self.continuations[3].idx as usize);
1451                } else {
1452                    cont.path().push_tail_property_index(cont.idx as usize);
1453                    self.continuations[1].path().push_tail_property_index(self.continuations[1].idx as usize);
1454                    self.continuations[2].path().push_tail_property_index(self.continuations[2].idx as usize);
1455                    self.continuations[3].path().push_tail_property_index(self.continuations[3].idx as usize);
1456                }
1457            };
1458            AbstractVaddr::rec_vaddr_eq_if_indices_eq(path, va_path, 0);
1459        };
1460    }
1461
1462    /// After alloc_if_none (absent→node) + restore, the cursor invariant holds.
1463    pub proof fn map_branch_none_inv_holds(self, owner0: Self)
1464        requires
1465            owner0.inv(),
1466            self.level == owner0.level,
1467            self.va == owner0.va,
1468            self.guard_level == owner0.guard_level,
1469            self.prefix == owner0.prefix,
1470            self.popped_too_high == owner0.popped_too_high,
1471            // Higher-level continuations unchanged
1472            forall|i: int| self.level <= i < NR_LEVELS ==>
1473                #[trigger] self.continuations[i] == owner0.continuations[i],
1474            // Bottom continuation is well-formed
1475            self.continuations[self.level - 1].inv(),
1476            self.continuations[self.level - 1].all_some(),
1477            self.continuations[self.level - 1].idx == owner0.continuations[owner0.level - 1].idx,
1478            self.continuations[self.level - 1].entry_own.parent_level
1479                == owner0.continuations[owner0.level - 1].entry_own.parent_level,
1480            // Guard address preserved (from parent_perms_preserved).
1481            self.continuations[self.level - 1].guard_perm.value().inner.inner@.ptr.addr()
1482                == owner0.continuations[owner0.level - 1].guard_perm.value().inner.inner@.ptr.addr(),
1483            self.continuations[self.level - 1].path()
1484                == owner0.continuations[owner0.level - 1].path(),
1485            self.va.index[self.level - 1] == self.continuations[self.level - 1].idx,
1486            // Domain preserved: same keys as owner0.
1487            self.continuations.dom() =~= owner0.continuations.dom(),
1488        ensures
1489            self.inv()
1490    {
1491        let L = self.level as int;
1492        assert(self.continuations[L - 1].level() == self.level) by {
1493            assert(self.continuations[L - 1].path() == owner0.continuations[L - 1].path());
1494            if L == 1 {} else if L == 2 {} else if L == 3 {} else {}
1495        };
1496        assert(self.continuations.contains_key(L - 1)) by {
1497            if L == 1 {} else if L == 2 {} else if L == 3 {} else {}
1498        };
1499    }
1500
1501    /// After alloc_if_none (absent→node), `path_tracked_pred` transfers via `map_children_lift`.
1502    pub proof fn map_branch_none_path_tracked_holds(
1503        self,
1504        owner0: Self,
1505        regions: MetaRegionOwners,
1506        old_regions: MetaRegionOwners,
1507    )
1508        requires
1509            owner0.relate_region(old_regions),
1510            self.inv(),
1511            self.level == owner0.level,
1512            forall|i: int| self.level <= i < NR_LEVELS ==>
1513                #[trigger] self.continuations[i] == owner0.continuations[i],
1514            Entry::<C>::path_tracked_pred_preserved(old_regions, regions),
1515            // Bottom continuation's children satisfy ptp(regions).
1516            self.continuations[self.level - 1].map_children(
1517                PageTableOwner::<C>::path_tracked_pred(regions)),
1518        ensures
1519            self.map_full_tree(PageTableOwner::<C>::path_tracked_pred(regions))
1520    {
1521        let ptp_old = PageTableOwner::<C>::path_tracked_pred(old_regions);
1522        let ptp_new = PageTableOwner::<C>::path_tracked_pred(regions);
1523        assert forall|i: int|
1524            #![trigger self.continuations[i]]
1525            self.level - 1 <= i < NR_LEVELS implies
1526                self.continuations[i].map_children(ptp_new)
1527        by {
1528            if i >= self.level as int {
1529                let cont = owner0.continuations[i];
1530                assert(cont.map_children(ptp_old));
1531                cont.map_children_lift(ptp_old, ptp_new);
1532            }
1533        };
1534    }
1535
1536    /// After alloc_if_none (absent→node), `view_mappings` is unchanged (both contribute zero mappings).
1537    pub proof fn map_branch_none_no_new_mappings(self, owner0: Self)
1538        requires
1539            owner0.inv(),
1540            self.inv(),
1541            self.level == owner0.level,
1542            self.va == owner0.va,
1543            forall|i: int| self.level <= i < NR_LEVELS ==>
1544                #[trigger] self.continuations[i] == owner0.continuations[i],
1545            // child at idx changed from absent to empty node
1546            owner0.continuations[owner0.level - 1].children[
1547                owner0.continuations[owner0.level - 1].idx as int] is Some,
1548            owner0.continuations[owner0.level - 1].children[
1549                owner0.continuations[owner0.level - 1].idx as int].unwrap().value.is_absent(),
1550            self.continuations[self.level - 1].children[
1551                self.continuations[self.level - 1].idx as int] is Some,
1552            self.continuations[self.level - 1].children[
1553                self.continuations[self.level - 1].idx as int].unwrap().value.is_node(),
1554            // Non-idx children and path preserved
1555            self.continuations[self.level - 1].path()
1556                == owner0.continuations[owner0.level - 1].path(),
1557            forall|j: int| 0 <= j < NR_ENTRIES
1558                && j != owner0.continuations[owner0.level - 1].idx as int ==>
1559                #[trigger] self.continuations[self.level - 1].children[j]
1560                    == owner0.continuations[owner0.level - 1].children[j],
1561            // The new node's subtree has empty view_rec (from alloc_if_none postcondition)
1562            PageTableOwner(self.continuations[self.level - 1].children[
1563                self.continuations[self.level - 1].idx as int].unwrap())
1564                .view_rec(self.continuations[self.level - 1].path()
1565                    .push_tail(self.continuations[self.level - 1].idx as usize))
1566                =~= Set::<Mapping>::empty(),
1567        ensures
1568            self.view_mappings() =~= owner0.view_mappings()
1569    {
1570        let L = self.level as int;
1571        let cont = self.continuations[L - 1];
1572        let cont0 = owner0.continuations[L - 1];
1573        let idx = cont0.idx as int;
1574
1575        assert(cont.view_mappings() =~= cont0.view_mappings()) by {
1576            cont0.inv_children_unroll(idx);
1577            PageTableOwner(cont0.children[idx].unwrap())
1578                .view_rec_absent_empty(cont0.path().push_tail(idx as usize));
1579            assert forall |m: Mapping| cont.view_mappings().contains(m)
1580                implies cont0.view_mappings().contains(m) by {
1581                let j = choose|j:int| 0 <= j < cont.children.len()
1582                    && #[trigger] cont.children[j] is Some
1583                    && PageTableOwner(cont.children[j].unwrap())
1584                        .view_rec(cont.path().push_tail(j as usize)).contains(m);
1585                if j != idx { assert(cont.children[j] == cont0.children[j]); }
1586            };
1587            assert forall |m: Mapping| cont0.view_mappings().contains(m)
1588                implies cont.view_mappings().contains(m) by {
1589                let j = choose|j:int| 0 <= j < cont0.children.len()
1590                    && #[trigger] cont0.children[j] is Some
1591                    && PageTableOwner(cont0.children[j].unwrap())
1592                        .view_rec(cont0.path().push_tail(j as usize)).contains(m);
1593                if j != idx { assert(cont0.children[j] == cont.children[j]); }
1594            };
1595        };
1596    }
1597
1598    /// After `map_branch_none` (alloc_if_none + push_level), the current entry is absent.
1599    ///
1600    /// Proof: `alloc_if_none` creates an empty PT node where all children are absent
1601    /// (`allocated_empty_node_owner` line 172). `push_level` enters one of these children,
1602    /// so `cur_entry_owner().is_absent()` holds.
1603    pub proof fn map_branch_none_cur_entry_absent(self)
1604        requires
1605            self.inv(),
1606            // All children of the current continuation are absent (from the empty node)
1607            forall|i: int| 0 <= i < NR_ENTRIES ==>
1608                #[trigger] self.continuations[self.level - 1].children[i] is Some &&
1609                self.continuations[self.level - 1].children[i].unwrap().value.is_absent(),
1610        ensures
1611            self.cur_entry_owner().is_absent(),
1612    {
1613        // cur_entry_owner() = continuations[level-1].children[index()].unwrap().value
1614        // From inv(): 0 <= index() < NR_ENTRIES, so precondition gives is_absent().
1615    }
1616
1617    /// After `map_branch_none` splits a huge frame at level `level_before_frame` and descends,
1618    /// the cursor view equals `owner0@.split_while_huge(page_size(level_before_frame - 1))`.
1619    ///
1620    /// Chain:
1621    ///   owner@ = owner_before_frame@.split_if_mapped_huge_spec(page_size(level_before_frame - 1))
1622    ///          = owner0@.split_while_huge(page_size(level_before_frame)).split_if_mapped_huge_spec(...)
1623    ///          = owner0@.split_while_huge(page_size(level_before_frame - 1))
1624    /// The last equality uses the fact that split_while_huge(L) on a frame of size page_size(L)
1625    /// takes exactly one split step to page_size(L-1), matching split_if_mapped_huge_spec.
1626    pub proof fn map_branch_frame_split_while_huge(
1627        self,
1628        owner0: Self,
1629        owner_before_frame: Self,
1630        level_before_frame: int,
1631    )
1632        requires
1633            self.inv(),
1634            owner0.inv(),
1635            owner_before_frame.inv(),
1636            1 <= level_before_frame - 1,
1637            level_before_frame <= NR_LEVELS,
1638            self.level == (level_before_frame - 1) as u8,
1639            owner_before_frame@ == owner0@.split_while_huge(
1640                page_size(level_before_frame as PagingLevel)),
1641            self@ == owner_before_frame@.split_if_mapped_huge_spec(
1642                page_size((level_before_frame - 1) as PagingLevel)),
1643        ensures
1644            self@ == owner0@.split_while_huge(page_size(self.level as PagingLevel))
1645    { admit() }
1646
1647    /// After split_if_mapped_huge + push_level, the mappings equal
1648    /// `old_view.split_while_huge(page_size(current_level))`.
1649    pub proof fn find_next_split_push_equals_split_while_huge(
1650        self,
1651        old_view: CursorView<C>,
1652    )
1653        requires
1654            self.inv(),
1655            self.cur_entry_owner().is_frame(),
1656            self@.cur_va == old_view.cur_va,
1657            old_view.present(),
1658            self@.mappings =~= old_view.split_if_mapped_huge_spec(
1659                page_size(self.level as PagingLevel)).mappings,
1660        ensures
1661            self@.mappings =~= old_view.split_while_huge(
1662                page_size(self.level as PagingLevel)).mappings,
1663    {
1664        let ps = page_size(self.level as PagingLevel);
1665        let m = old_view.query_mapping();
1666        if m.page_size > ps && m.page_size / NR_ENTRIES == ps {
1667            old_view.split_while_huge_one_step(ps);
1668        } else {
1669            admit() // edge cases: multi-step split or page_size <= ps
1670        }
1671    }
1672
1673    /// `split_while_huge` gives the same mappings for two `cur_va` values
1674    /// when no mapping starts between them and the `!present` case is a no-op.
1675    pub proof fn split_while_huge_cur_va_independent(
1676        v1: CursorView<C>,
1677        v2: CursorView<C>,
1678        size: usize,
1679    )
1680        requires
1681            v1.inv(),
1682            v2.inv(),
1683            v1.mappings =~= v2.mappings,
1684            v1.cur_va <= v2.cur_va,
1685            // No mapping starts in [v1.cur_va, v2.cur_va).
1686            v1.mappings.filter(|m: Mapping|
1687                v1.cur_va <= m.va_range.start && m.va_range.start < v2.cur_va)
1688                =~= Set::<Mapping>::empty(),
1689            // When v1 has no mapping at cur_va, any mapping at v2.cur_va is
1690            // already small enough that split_while_huge is a no-op on it too.
1691            // (At the call site this follows from: split_while_huge(v1) was a
1692            // no-op, so find_next found the mapping without splitting, meaning
1693            // its page_size <= size.)
1694            !v1.present() && v2.present() ==> v2.query_mapping().page_size <= size,
1695        ensures
1696            v1.split_while_huge(size).mappings =~= v2.split_while_huge(size).mappings,
1697    {
1698        if v1.cur_va == v2.cur_va {
1699            assert(v1 =~= v2);
1700            return;
1701        }
1702        if v1.present() {
1703            // Both VAs are covered by the same mapping (or v2 is not present).
1704            // In either case split_while_huge proceeds identically.
1705            admit()
1706        }
1707        // !v1.present(): both split_while_huge calls are no-ops.
1708    }
1709
1710    pub open spec fn locked_range(self) -> Range<Vaddr> {
1711        let start = self.prefix.align_down(self.guard_level as int).to_vaddr();
1712        let end = self.prefix.align_up(self.guard_level as int).to_vaddr();
1713        Range { start, end }
1714    }
1715
1716    pub open spec fn in_locked_range(self) -> bool {
1717        self.locked_range().start <= self.va.to_vaddr() < self.locked_range().end
1718    }
1719
1720    pub open spec fn above_locked_range(self) -> bool {
1721        self.va.to_vaddr() >= self.locked_range().end
1722    }
1723
1724    pub proof fn prefix_in_locked_range(self)
1725        requires
1726            forall|i:int| i >= self.guard_level ==> self.va.index[i] == self.prefix.index[i],
1727        ensures
1728            self.in_locked_range(),
1729    { admit() }
1730
1731    pub proof fn in_locked_range_level_lt_nr_levels(self)
1732        requires self.inv(), self.in_locked_range(), !self.popped_too_high,
1733        ensures self.level < NR_LEVELS,
1734    {
1735        self.in_locked_range_level_lt_guard_level();
1736    }
1737
1738    pub proof fn in_locked_range_level_lt_guard_level(self)
1739        requires self.inv(), self.in_locked_range(), !self.popped_too_high,
1740        ensures self.level < self.guard_level,
1741    {
1742        assert(self.in_locked_range() ==> !self.above_locked_range());
1743    }
1744
1745    /// The node at `level+1` containing `va` fits within the locked range.
1746    pub proof fn node_within_locked_range(self, level: PagingLevel)
1747        requires
1748            self.in_locked_range(),
1749            1 <= level < self.guard_level,
1750            self.va.inv(),
1751        ensures
1752            self.locked_range().start <= nat_align_down(self.va.to_vaddr() as nat, page_size((level + 1) as PagingLevel) as nat) as usize,
1753            nat_align_down(self.va.to_vaddr() as nat, page_size((level + 1) as PagingLevel) as nat) as usize + page_size((level + 1) as PagingLevel) <= self.locked_range().end,
1754    {
1755        admit()
1756    }
1757
1758    pub proof fn locked_range_page_aligned(self)
1759        requires
1760            self.inv(),
1761        ensures
1762            self.locked_range().end % PAGE_SIZE == 0,
1763            self.locked_range().start % PAGE_SIZE == 0,
1764    {
1765        let gl = self.guard_level;
1766        if gl == 0 {
1767            // guard_level == 0 shouldn't happen in practice, but handle defensively.
1768            admit();
1769            return;
1770        }
1771        let pv = self.prefix.to_vaddr() as nat;
1772        let ps = page_size(gl as PagingLevel) as nat;
1773        lemma_page_size_ge_page_size(gl as PagingLevel);
1774        lemma_page_size_divides(1u8, gl as PagingLevel);
1775        lemma_page_size_spec_level1();
1776        lemma_nat_align_down_sound(pv, ps);
1777        lemma_nat_align_up_sound(pv, ps);
1778        let start_va = nat_align_down(pv, ps);
1779        let end_va = nat_align_up(pv, ps);
1780        vstd::arithmetic::div_mod::lemma_mod_mod(start_va as int, PAGE_SIZE as int, ps as int / PAGE_SIZE as int);
1781        vstd::arithmetic::div_mod::lemma_mod_mod(end_va as int, PAGE_SIZE as int, ps as int / PAGE_SIZE as int);
1782        self.prefix.align_down_concrete(gl as int);
1783        self.prefix.align_up_concrete(gl as int);
1784        AbstractVaddr::from_vaddr_to_vaddr_roundtrip(start_va as Vaddr);
1785        AbstractVaddr::from_vaddr_to_vaddr_roundtrip(end_va as Vaddr);
1786        // Admitting: nat→Vaddr cast doesn't overflow (requires prefix.to_vaddr() < MAX bound).
1787        admit();
1788    }
1789
1790    pub proof fn cur_subtree_inv(self)
1791        requires
1792            self.inv()
1793        ensures
1794            self.cur_subtree().inv()
1795    {
1796        let cont = self.continuations[self.level - 1];
1797        cont.inv_children_unroll(cont.idx as int)
1798    }
1799
1800    /// If the current entry is absent, `!self@.present()`.
1801    pub proof fn cur_entry_absent_not_present(self)
1802        requires self.inv(), self.cur_entry_owner().is_absent(),
1803        ensures !self@.present(),
1804    {
1805        self.cur_subtree_inv();
1806        let cur_va = self.cur_va();
1807        let cur_subtree = self.cur_subtree();
1808        let cur_path = cur_subtree.value.path;
1809        PageTableOwner(cur_subtree).view_rec_absent_empty(cur_path);
1810
1811        assert forall |m: Mapping| self.view_mappings().contains(m) implies
1812            !(m.va_range.start <= cur_va < m.va_range.end) by {
1813            if m.va_range.start <= cur_va < m.va_range.end {
1814                self.mapping_covering_cur_va_from_cur_subtree(m);
1815            }
1816        };
1817
1818        let filtered = self@.mappings.filter(
1819            |m: Mapping| m.va_range.start <= self@.cur_va < m.va_range.end);
1820        assert(filtered =~= set![]) by {
1821            assert forall |m: Mapping| !filtered.contains(m) by {
1822                if self@.mappings.contains(m) && m.va_range.start <= self@.cur_va < m.va_range.end {
1823                    assert(self.view_mappings().contains(m));
1824                }
1825            };
1826        };
1827    }
1828
1829    /// Generalises `cur_entry_absent_not_present` to any empty subtree.
1830    pub proof fn cur_subtree_empty_not_present(self)
1831        requires
1832            self.inv(),
1833            PageTableOwner(self.cur_subtree()).view_rec(self.cur_subtree().value.path) =~= set![],
1834        ensures !self@.present(),
1835    {
1836        let cur_va = self.cur_va();
1837
1838        assert forall |m: Mapping| self.view_mappings().contains(m) implies
1839            !(m.va_range.start <= cur_va < m.va_range.end) by {
1840            if m.va_range.start <= cur_va < m.va_range.end {
1841                self.mapping_covering_cur_va_from_cur_subtree(m);
1842            }
1843        };
1844
1845        let filtered = self@.mappings.filter(
1846            |m: Mapping| m.va_range.start <= self@.cur_va < m.va_range.end);
1847        assert(filtered =~= set![]) by {
1848            assert forall |m: Mapping| !filtered.contains(m) by {
1849                if self@.mappings.contains(m) && m.va_range.start <= self@.cur_va < m.va_range.end {
1850                    assert(self.view_mappings().contains(m));
1851                }
1852            };
1853        };
1854        assert(filtered.len() == 0);
1855    }
1856
1857    pub proof fn cur_entry_frame_present(self)
1858        requires
1859            self.inv(),
1860            self.cur_entry_owner().is_frame(),
1861        ensures
1862            self@.present(),
1863            self@.query(self.cur_entry_owner().frame.unwrap().mapped_pa,
1864                self.cur_entry_owner().frame.unwrap().size,
1865                self.cur_entry_owner().frame.unwrap().prop),
1866    {
1867        self.cur_subtree_inv();
1868        self.cur_va_in_subtree_range();
1869        self.view_preserves_inv();
1870        let subtree = self.cur_subtree();
1871        let path = subtree.value.path;
1872        let frame = self.cur_entry_owner().frame.unwrap();
1873        let pt_level = INC_LEVELS - path.len();
1874        let cont = self.continuations[self.level - 1];
1875
1876        cont.path().push_tail_property_len(cont.idx as usize);
1877        assert(cont.level() == self.level) by {
1878            if self.level == 1 {} else if self.level == 2 {} else if self.level == 3 {} else {}
1879        };
1880        assert(pt_level == self.level);
1881
1882        let m = Mapping {
1883            va_range: Range { start: vaddr(path), end: (vaddr(path) + page_size(pt_level as PagingLevel)) as Vaddr },
1884            pa_range: Range { start: frame.mapped_pa, end: (frame.mapped_pa + page_size(pt_level as PagingLevel)) as Paddr },
1885            page_size: page_size(pt_level as PagingLevel),
1886            property: frame.prop,
1887        };
1888        assert(PageTableOwner(subtree).view_rec(path) =~= set![m]);
1889        assert(self.view_mappings().contains(m));
1890        assert(m.inv());
1891
1892        let filtered = self@.mappings.filter(|m2: Mapping| m2.va_range.start <= self@.cur_va < m2.va_range.end);
1893        axiom_set_intersect_finite::<Mapping>(
1894            self@.mappings, Set::new(|m2: Mapping| m2.va_range.start <= self@.cur_va < m2.va_range.end));
1895        axiom_set_contains_len(filtered, m);
1896    }
1897
1898    pub open spec fn relate_region(self, regions: MetaRegionOwners) -> bool
1899    {
1900        &&& self.map_full_tree(|entry_owner: EntryOwner<C>, path: TreePath<NR_ENTRIES>|
1901            entry_owner.relate_region(regions))
1902        &&& self.map_full_tree(PageTableOwner::<C>::path_tracked_pred(regions))
1903    }
1904
1905    pub open spec fn not_in_tree(self, owner: EntryOwner<C>) -> bool {
1906        self.map_full_tree(|owner0: EntryOwner<C>, path: TreePath<NR_ENTRIES>|
1907            owner0.meta_slot_paddr_neq(owner))
1908    }
1909
1910    pub proof fn absent_not_in_tree(self, owner: EntryOwner<C>)
1911        requires
1912            self.inv(),
1913            owner.inv(),
1914            owner.is_absent(),
1915        ensures
1916            self.not_in_tree(owner),
1917    {
1918        let g = |e: EntryOwner<C>, p: TreePath<NR_ENTRIES>| e.meta_slot_paddr_neq(owner);
1919        let nsp = PageTableOwner::<C>::not_in_scope_pred();
1920        assert(OwnerSubtree::implies(nsp, g)) by {
1921            assert forall |entry: EntryOwner<C>, path: TreePath<NR_ENTRIES>|
1922                entry.inv() && !entry.in_scope implies #[trigger] g(entry, path) by {};
1923        };
1924        assert forall |i: int| #![trigger self.continuations[i]]
1925            self.level - 1 <= i < NR_LEVELS implies self.continuations[i].map_children(g)
1926        by {
1927            let cont = self.continuations[i];
1928            reveal(CursorContinuation::inv_children);
1929            assert forall |j: int| 0 <= j < NR_ENTRIES
1930                && #[trigger] cont.children[j] is Some implies
1931                cont.children[j].unwrap().tree_predicate_map(cont.path().push_tail(j as usize), g)
1932            by {
1933                cont.inv_children_unroll(j);
1934                PageTableOwner::tree_not_in_scope(cont.children[j].unwrap(), cont.path().push_tail(j as usize));
1935                cont.children[j].unwrap().map_implies(cont.path().push_tail(j as usize), nsp, g);
1936            };
1937        };
1938    }
1939
1940    /// If the cursor owner's tree satisfies `relate_region(regions)`, and a new entry's physical
1941    /// address is not currently tracked in the page table (`path_if_in_pt is None`), then no
1942    /// existing entry in the tree has the same physical address as the new entry.
1943    ///
1944    /// This lemma encapsulates the `map_children_implies` proof for `not_in_tree`, factored out
1945    /// so it runs in its own Z3 context (avoiding rlimit issues when called from large functions).
1946    pub proof fn not_in_tree_from_not_mapped(
1947        self,
1948        regions: MetaRegionOwners,
1949        new_entry: EntryOwner<C>,
1950    )
1951        requires
1952            self.inv(),
1953            self.relate_region(regions),
1954            new_entry.meta_slot_paddr() is Some,
1955            regions.slot_owners[
1956                frame_to_index(new_entry.meta_slot_paddr().unwrap())
1957            ].path_if_in_pt is None,
1958        ensures
1959            self.not_in_tree(new_entry),
1960    {
1961        let pa = new_entry.meta_slot_paddr().unwrap();
1962        let g = |e: EntryOwner<C>, p: TreePath<NR_ENTRIES>| e.meta_slot_paddr_neq(new_entry);
1963        assert(OwnerSubtree::implies(PageTableOwner::<C>::path_tracked_pred(regions), g)) by {
1964            assert forall |entry: EntryOwner<C>, path: TreePath<NR_ENTRIES>|
1965                PageTableOwner::<C>::path_tracked_pred(regions)(entry, path)
1966                implies #[trigger] g(entry, path) by {
1967                if entry.meta_slot_paddr() is Some && entry.meta_slot_paddr().unwrap() == pa {
1968                    assert(false);
1969                }
1970            };
1971        };
1972        self.map_children_implies(PageTableOwner::<C>::path_tracked_pred(regions), g);
1973    }
1974
1975    pub proof fn relate_region_preserved(self, other: Self, regions0: MetaRegionOwners, regions1: MetaRegionOwners)
1976        requires
1977            self.inv(),
1978            self.relate_region(regions0),
1979            self.level == other.level,
1980            self.continuations =~= other.continuations,
1981            OwnerSubtree::implies(
1982                PageTableOwner::<C>::relate_region_pred(regions0),
1983                PageTableOwner::<C>::relate_region_pred(regions1)),
1984            OwnerSubtree::implies(
1985                PageTableOwner::<C>::path_tracked_pred(regions0),
1986                PageTableOwner::<C>::path_tracked_pred(regions1)),
1987        ensures
1988            other.relate_region(regions1),
1989    {
1990        let e = PageTableOwner::<C>::path_tracked_pred(regions0);
1991        let f = PageTableOwner::relate_region_pred(regions0);
1992        let g = PageTableOwner::relate_region_pred(regions1);
1993        let h = PageTableOwner::<C>::path_tracked_pred(regions1);
1994
1995        assert forall|i: int| #![auto] self.level - 1 <= i < NR_LEVELS implies {
1996            &&& other.continuations[i].map_children(g)
1997            &&& other.continuations[i].map_children(h)
1998        } by {
1999            let cont = self.continuations[i];
2000            assert(cont.inv());
2001            assert(cont.map_children(f));
2002            assert(cont.map_children(e));
2003            assert(cont == other.continuations[i]);
2004            reveal(CursorContinuation::inv_children);
2005            assert forall |j: int| 0 <= j < NR_ENTRIES && #[trigger] cont.children[j] is Some implies
2006                cont.children[j].unwrap().tree_predicate_map(cont.path().push_tail(j as usize), g) by {
2007                    cont.inv_children_unroll(j);
2008                    cont.children[j].unwrap().map_implies(cont.path().push_tail(j as usize), f, g);
2009            };
2010            assert forall |j: int| 0 <= j < NR_ENTRIES && #[trigger] cont.children[j] is Some implies
2011                cont.children[j].unwrap().tree_predicate_map(cont.path().push_tail(j as usize), h) by {
2012                    cont.inv_children_unroll(j);
2013                    cont.children[j].unwrap().map_implies(cont.path().push_tail(j as usize), e, h);
2014            };
2015        };
2016    }
2017
2018    /// Transfers `relate_region` when `slot_owners` is preserved.
2019    pub proof fn relate_region_slot_owners_preserved(self, regions0: MetaRegionOwners, regions1: MetaRegionOwners)
2020        requires
2021            self.inv(),
2022            self.relate_region(regions0),
2023            regions0.slot_owners =~= regions1.slot_owners,
2024        ensures
2025            self.relate_region(regions1),
2026    {
2027        let f = PageTableOwner::<C>::relate_region_pred(regions0);
2028        let g = PageTableOwner::<C>::relate_region_pred(regions1);
2029        let e = PageTableOwner::<C>::path_tracked_pred(regions0);
2030        let h = PageTableOwner::<C>::path_tracked_pred(regions1);
2031        assert forall |entry: EntryOwner<C>, path: TreePath<NR_ENTRIES>|
2032            entry.inv() && f(entry, path) implies #[trigger] g(entry, path) by {
2033                entry.relate_region_slot_owners_only(regions0, regions1);
2034        };
2035        assert forall |entry: EntryOwner<C>, path: TreePath<NR_ENTRIES>|
2036            entry.inv() && e(entry, path) implies #[trigger] h(entry, path) by {};
2037        self.relate_region_preserved(self, regions0, regions1);
2038    }
2039
2040    pub proof fn relate_region_slot_owners_rc_increment(
2041        self, regions0: MetaRegionOwners, regions1: MetaRegionOwners, idx: usize)
2042        requires
2043            self.inv(),
2044            self.relate_region(regions0),
2045            regions0.inv(),
2046            regions1.slots == regions0.slots,
2047            regions1.slot_owners.dom() == regions0.slot_owners.dom(),
2048            regions1.slot_owners[idx].inner_perms.ref_count.value() ==
2049                regions0.slot_owners[idx].inner_perms.ref_count.value() + 1,
2050            regions1.slot_owners[idx].inner_perms.ref_count.id() ==
2051                regions0.slot_owners[idx].inner_perms.ref_count.id(),
2052            regions1.slot_owners[idx].inner_perms.storage ==
2053                regions0.slot_owners[idx].inner_perms.storage,
2054            regions1.slot_owners[idx].inner_perms.vtable_ptr ==
2055                regions0.slot_owners[idx].inner_perms.vtable_ptr,
2056            regions1.slot_owners[idx].inner_perms.in_list ==
2057                regions0.slot_owners[idx].inner_perms.in_list,
2058            regions1.slot_owners[idx].path_if_in_pt == regions0.slot_owners[idx].path_if_in_pt,
2059            regions1.slot_owners[idx].self_addr == regions0.slot_owners[idx].self_addr,
2060            regions1.slot_owners[idx].raw_count == regions0.slot_owners[idx].raw_count,
2061            regions1.slot_owners[idx].usage == regions0.slot_owners[idx].usage,
2062            regions1.slot_owners[idx].inner_perms.ref_count.value()
2063                != REF_COUNT_UNUSED,
2064            forall |i: usize| #![trigger regions1.slot_owners[i]]
2065                i != idx && regions0.slot_owners.contains_key(i) ==>
2066                regions1.slot_owners[i] == regions0.slot_owners[i],
2067        ensures
2068            self.relate_region(regions1),
2069    {
2070        let f = PageTableOwner::<C>::relate_region_pred(regions0);
2071        let g = PageTableOwner::<C>::relate_region_pred(regions1);
2072        let e = PageTableOwner::<C>::path_tracked_pred(regions0);
2073        let h = PageTableOwner::<C>::path_tracked_pred(regions1);
2074        assert(OwnerSubtree::implies(f, g)) by {
2075            assert forall |entry: EntryOwner<C>, path: TreePath<NR_ENTRIES>|
2076                entry.inv() && f(entry, path) implies #[trigger] g(entry, path) by {
2077                if entry.meta_slot_paddr() is Some {
2078                    let eidx = frame_to_index(entry.meta_slot_paddr().unwrap());
2079                    if eidx != idx {} else { entry.relate_region_rc_value_changed(regions0, regions1); }
2080                }
2081            };
2082        };
2083        assert(OwnerSubtree::implies(e, h)) by {
2084            assert forall |entry: EntryOwner<C>, path: TreePath<NR_ENTRIES>|
2085                entry.inv() && e(entry, path) implies #[trigger] h(entry, path) by {
2086                if entry.meta_slot_paddr() is Some { let eidx = frame_to_index(entry.meta_slot_paddr().unwrap()); if eidx != idx {} }
2087            };
2088        };
2089        self.relate_region_preserved(self, regions0, regions1);
2090    }
2091
2092    /// Transfers `relate_region` when `raw_count` changed from 0 to 1 at one index.
2093    /// Uses `map_implies_and` with `not_in_scope_pred` since tree entries have `!in_scope`.
2094    pub proof fn relate_region_borrow_slot(
2095        self, regions0: MetaRegionOwners, regions1: MetaRegionOwners, changed_idx: usize
2096    )
2097        requires
2098            self.inv(),
2099            self.relate_region(regions0),
2100            regions1.inv(),
2101            regions0.slot_owners[changed_idx].raw_count == 0,
2102            regions1.slot_owners[changed_idx].raw_count == 1,
2103            // All other fields at changed_idx preserved
2104            regions1.slot_owners[changed_idx].inner_perms
2105                == regions0.slot_owners[changed_idx].inner_perms,
2106            regions1.slot_owners[changed_idx].self_addr
2107                == regions0.slot_owners[changed_idx].self_addr,
2108            regions1.slot_owners[changed_idx].usage
2109                == regions0.slot_owners[changed_idx].usage,
2110            regions1.slot_owners[changed_idx].path_if_in_pt
2111                == regions0.slot_owners[changed_idx].path_if_in_pt,
2112            // All other slots unchanged
2113            forall |i: usize| #![trigger regions1.slot_owners[i]]
2114                i != changed_idx ==> regions0.slot_owners[i] == regions1.slot_owners[i],
2115            regions0.slot_owners.dom() =~= regions1.slot_owners.dom(),
2116        ensures
2117            self.relate_region(regions1),
2118    {
2119        let f = PageTableOwner::<C>::relate_region_pred(regions0);
2120        let g = PageTableOwner::<C>::relate_region_pred(regions1);
2121        let e = PageTableOwner::<C>::path_tracked_pred(regions0);
2122        let h = PageTableOwner::<C>::path_tracked_pred(regions1);
2123        let nsp = PageTableOwner::<C>::not_in_scope_pred();
2124
2125        // implies(f && nsp, g): with !in_scope, nodes at changed_idx have expected_raw_count==1
2126        // but r0.raw_count==0, so f is false → vacuous. Frames don't check raw_count.
2127        assert(OwnerSubtree::implies(
2128            |v: EntryOwner<C>, p: TreePath<NR_ENTRIES>| f(v, p) && nsp(v, p), g)) by {
2129            assert forall |entry: EntryOwner<C>, path: TreePath<NR_ENTRIES>|
2130                entry.inv() && f(entry, path) && nsp(entry, path)
2131                implies #[trigger] g(entry, path) by {
2132                if entry.meta_slot_paddr() is Some {
2133                    let eidx = frame_to_index(entry.meta_slot_paddr().unwrap());
2134                    if eidx != changed_idx {
2135                        assert(regions0.slot_owners[eidx] == regions1.slot_owners[eidx]);
2136                    }
2137                }
2138            };
2139        };
2140
2141        // implies(e && nsp, h): path_if_in_pt unchanged.
2142        assert(OwnerSubtree::implies(
2143            |v: EntryOwner<C>, p: TreePath<NR_ENTRIES>| e(v, p) && nsp(v, p), h)) by {
2144            assert forall |entry: EntryOwner<C>, path: TreePath<NR_ENTRIES>|
2145                entry.inv() && e(entry, path) && nsp(entry, path)
2146                implies #[trigger] h(entry, path) by {
2147                if entry.meta_slot_paddr() is Some {
2148                    let eidx = frame_to_index(entry.meta_slot_paddr().unwrap());
2149                    if eidx != changed_idx {}
2150                }
2151            };
2152        };
2153
2154        assert forall |i: int|
2155            #![trigger self.continuations[i]]
2156            self.level - 1 <= i < NR_LEVELS implies {
2157                &&& self.continuations[i].map_children(g)
2158                &&& self.continuations[i].map_children(h)
2159            }
2160        by {
2161            let cont = self.continuations[i];
2162            assert(cont.map_children(f));
2163            assert(cont.map_children(e));
2164            reveal(CursorContinuation::inv_children);
2165            assert forall |j: int| 0 <= j < NR_ENTRIES
2166                && #[trigger] cont.children[j] is Some implies
2167                cont.children[j].unwrap().tree_predicate_map(cont.path().push_tail(j as usize), nsp)
2168            by {
2169                cont.inv_children_unroll(j);
2170                PageTableOwner::tree_not_in_scope(cont.children[j].unwrap(), cont.path().push_tail(j as usize));
2171            };
2172            assert forall |j: int| 0 <= j < NR_ENTRIES
2173                && #[trigger] cont.children[j] is Some implies
2174                cont.children[j].unwrap().tree_predicate_map(cont.path().push_tail(j as usize), g)
2175            by {
2176                cont.inv_children_unroll(j);
2177                cont.children[j].unwrap().map_implies_and(cont.path().push_tail(j as usize), f, nsp, g);
2178            };
2179            assert forall |j: int| 0 <= j < NR_ENTRIES
2180                && #[trigger] cont.children[j] is Some implies
2181                cont.children[j].unwrap().tree_predicate_map(cont.path().push_tail(j as usize), h)
2182            by {
2183                cont.inv_children_unroll(j);
2184                cont.children[j].unwrap().map_implies_and(cont.path().push_tail(j as usize), e, nsp, h);
2185            };
2186        };
2187    }
2188
2189    /// Continuation entry_owns satisfy `relate_region` and `path_tracked_pred`.
2190    ///
2191    /// ## Justification
2192    /// When the cursor descends into a subtree, each continuation's `entry_own`
2193    /// was previously checked by `tree_predicate_map` in the parent's child
2194    /// subtree.  After descent, `map_full_tree` only covers the siblings (the
2195    /// taken child is `None`), so the path entries' properties are no longer
2196    /// covered by `map_full_tree`.  However, `regions` is unchanged since
2197    /// descent, so the properties still hold.
2198    pub proof fn cont_entries_relate_region(
2199        self, regions: MetaRegionOwners,
2200    )
2201        requires
2202            self.inv(),
2203            self.relate_region(regions),
2204        ensures
2205            forall|i: int| #![trigger self.continuations[i]]
2206                self.level - 1 <= i < NR_LEVELS ==> {
2207                    &&& self.continuations[i].entry_own.relate_region(regions)
2208                    &&& PageTableOwner::<C>::path_tracked_pred(regions)(
2209                            self.continuations[i].entry_own,
2210                            self.continuations[i].path())
2211                },
2212    {
2213        // Each entry_own was checked by tree_predicate_map in the parent's child subtree
2214        // before descent. After descent, the child slot is None so map_full_tree no longer
2215        // covers entry_owns, but regions is unchanged so the properties persist.
2216        // Formalizing this requires tracking the construction history of continuations.
2217        admit()
2218    }
2219
2220    /// Cursor path nesting: for two continuations where `i > j >= self.level - 1`,
2221    /// `cont_i` is an ancestor of `cont_j` in the page table tree.
2222    /// The path from the root to `cont_j` passes through `cont_i.idx` at level `cont_i`,
2223    /// i.e., `cont_j.path()[cont_i.path().len()] == cont_i.idx`.
2224    ///
2225    /// This holds because the cursor was built by descending through `cont_i.idx` at each level.
2226    pub proof fn cursor_path_nesting(self, i: int, j: int)
2227        requires
2228            self.inv(),
2229            self.level - 1 <= j < i,
2230            i < NR_LEVELS,
2231        ensures
2232            self.continuations[j].path().len() as int > self.continuations[i].path().len() as int,
2233            self.continuations[j].path().index(self.continuations[i].path().len() as int)
2234                == self.continuations[i].idx,
2235    {
2236        if i == 3 && j == 2 {
2237            self.continuations[3].path().push_tail_property_index(self.continuations[3].idx as usize);
2238            self.continuations[3].path().push_tail_property_len(self.continuations[3].idx as usize);
2239        } else if i == 3 && j == 1 {
2240            let p3 = self.continuations[3].path();
2241            let p2 = self.continuations[2].path();
2242            let idx3 = self.continuations[3].idx as usize;
2243            let idx2 = self.continuations[2].idx as usize;
2244            p3.push_tail_property_index(idx3);
2245            p3.push_tail_property_len(idx3);
2246            p2.push_tail_property_index(idx2);
2247            p2.push_tail_property_len(idx2);
2248            assert(p3.len() < p2.len());
2249            assert(self.continuations[1].path() == p2.push_tail(idx2));
2250            assert(p2.push_tail(idx2).index(p3.len() as int) == p2.index(p3.len() as int));
2251        } else if i == 3 && j == 0 {
2252            let p3 = self.continuations[3].path();
2253            let p2 = self.continuations[2].path();
2254            let p1 = self.continuations[1].path();
2255            let idx3 = self.continuations[3].idx as usize;
2256            let idx2 = self.continuations[2].idx as usize;
2257            let idx1 = self.continuations[1].idx as usize;
2258            p3.push_tail_property_index(idx3);
2259            p3.push_tail_property_len(idx3);
2260            p2.push_tail_property_index(idx2);
2261            p2.push_tail_property_len(idx2);
2262            p1.push_tail_property_index(idx1);
2263            p1.push_tail_property_len(idx1);
2264            assert(p3.len() < p2.len());
2265            assert(p3.len() < p1.len());
2266            assert(p1.push_tail(idx1).index(p3.len() as int) == p1.index(p3.len() as int));
2267            assert(p2.push_tail(idx2).index(p3.len() as int) == p2.index(p3.len() as int));
2268        } else if i == 2 && j == 1 {
2269            self.continuations[2].path().push_tail_property_index(self.continuations[2].idx as usize);
2270            self.continuations[2].path().push_tail_property_len(self.continuations[2].idx as usize);
2271        } else if i == 2 && j == 0 {
2272            let p2 = self.continuations[2].path();
2273            let p1 = self.continuations[1].path();
2274            let idx2 = self.continuations[2].idx as usize;
2275            let idx1 = self.continuations[1].idx as usize;
2276            p2.push_tail_property_index(idx2);
2277            p2.push_tail_property_len(idx2);
2278            p1.push_tail_property_index(idx1);
2279            p1.push_tail_property_len(idx1);
2280            assert(p2.len() < p1.len());
2281            assert(self.continuations[0].path() == p1.push_tail(idx1));
2282            assert(p1.push_tail(idx1).index(p2.len() as int) == p1.index(p2.len() as int));
2283            assert(p1 == p2.push_tail(idx2));
2284            assert(p2.push_tail(idx2).index(p2.len() as int) == idx2);
2285        } else if i == 1 && j == 0 {
2286            self.continuations[1].path().push_tail_property_index(self.continuations[1].idx as usize);
2287            self.continuations[1].path().push_tail_property_len(self.continuations[1].idx as usize);
2288        }
2289    }
2290
2291    pub open spec fn set_va_spec(self, new_va: AbstractVaddr) -> Self {
2292        Self {
2293            va: new_va,
2294            ..self
2295        }
2296    }
2297
2298    pub axiom fn set_va(tracked &mut self, new_va: AbstractVaddr)
2299        requires
2300            forall |i: int| #![auto] old(self).level - 1 <= i < NR_LEVELS ==> new_va.index[i] == old(self).va.index[i],
2301            forall |i: int| #![auto] old(self).guard_level - 1 <= i < NR_LEVELS ==> new_va.index[i] == old(self).prefix.index[i],
2302        ensures
2303            *self == old(self).set_va_spec(new_va);
2304
2305    pub open spec fn set_va_in_node_spec(self, new_va: AbstractVaddr) -> Self {
2306        let old_cont = self.continuations[self.level - 1];
2307        Self {
2308            va: new_va,
2309            continuations: self.continuations.insert(
2310                self.level - 1,
2311                CursorContinuation {
2312                    idx: new_va.index[self.level - 1] as usize,
2313                    ..old_cont
2314                },
2315            ),
2316            ..self
2317        }
2318    }
2319
2320    /// When jumping within the same page-table node, only indices at levels
2321    /// >= level are guaranteed to match. The entry-within-node index (level - 1)
2322    /// may change, so we update continuations[level-1].idx along with va.
2323    pub axiom fn set_va_in_node(tracked &mut self, new_va: AbstractVaddr)
2324        requires
2325            old(self).inv(),
2326            new_va.inv(),
2327            forall|i: int| #![auto] old(self).level <= i < NR_LEVELS
2328                ==> new_va.index[i] == old(self).va.index[i],
2329            old(self).locked_range().start <= new_va.to_vaddr()
2330                < old(self).locked_range().end,
2331        ensures
2332            *self == old(self).set_va_in_node_spec(new_va),
2333            self.inv(),;
2334
2335    pub open spec fn new_spec(owner_subtree: OwnerSubtree<C>, idx: usize, guard_perm: GuardPerm<'rcu, C>) -> Self {
2336        let va = AbstractVaddr {
2337            offset: 0,
2338            index: Map::new(|i: int| 0 <= i < NR_LEVELS, |i: int| 0).insert(NR_LEVELS - 1, idx as int),
2339        };
2340        Self {
2341            level: NR_LEVELS as PagingLevel,
2342            continuations: Map::empty().insert(NR_LEVELS - 1 as int, CursorContinuation::new_spec(owner_subtree, idx, guard_perm)),
2343            va,
2344            guard_level: NR_LEVELS as PagingLevel,
2345            prefix: va,
2346            popped_too_high: false,
2347        }
2348    }
2349
2350    pub axiom fn new(tracked owner_subtree: OwnerSubtree<C>, idx: usize, tracked guard_perm: GuardPerm<'rcu, C>)
2351    -> (tracked res: Self)
2352        ensures
2353            res == Self::new_spec(owner_subtree, idx, guard_perm);
2354
2355    pub proof fn jump_above_locked_range_va_in_node(
2356        self,
2357        va: Vaddr,
2358        node_start: Vaddr,
2359    )
2360        requires
2361            self.inv(),
2362            self.level == self.guard_level,
2363            self.above_locked_range(),
2364            self.locked_range().start <= va < self.locked_range().end,
2365            node_start == nat_align_down(self.va.to_vaddr() as nat,
2366                page_size((self.level + 1) as PagingLevel) as nat) as usize,
2367        ensures
2368            node_start <= va,
2369            va < node_start + page_size((self.level + 1) as PagingLevel),
2370    {
2371        let gl = self.guard_level;
2372        let ps_gl = page_size_spec(gl as PagingLevel) as nat;
2373        let ps_gl1 = page_size_spec((gl + 1) as PagingLevel) as nat;
2374        let pv = self.prefix.to_vaddr() as nat;
2375        let cv = self.va.to_vaddr() as nat;
2376        lemma_page_size_ge_page_size(gl as PagingLevel);
2377        lemma_page_size_ge_page_size((gl + 1) as PagingLevel);
2378        lemma_nat_align_down_sound(pv, ps_gl);
2379        lemma_nat_align_up_sound(pv, ps_gl);
2380        lemma_nat_align_down_sound(cv, ps_gl1);
2381
2382        lemma_page_size_divides(gl as PagingLevel, (gl + 1) as PagingLevel);
2383        self.prefix.align_down_concrete(gl as int);
2384        AbstractVaddr::from_vaddr_to_vaddr_roundtrip(nat_align_down(pv, ps_gl) as Vaddr);
2385        self.prefix.align_up_concrete(gl as int);
2386        AbstractVaddr::from_vaddr_to_vaddr_roundtrip(nat_align_up(pv, ps_gl) as Vaddr);
2387        self.prefix.align_diff(gl as int);
2388
2389        if gl < NR_LEVELS {
2390            self.va.align_down_to_vaddr_eq_if_upper_indices_eq(self.prefix, (gl + 1) as int);
2391            self.va.align_down_concrete((gl + 1) as int);
2392            AbstractVaddr::from_vaddr_to_vaddr_roundtrip(nat_align_down(cv, ps_gl1) as Vaddr);
2393            self.prefix.align_down_concrete((gl + 1) as int);
2394            AbstractVaddr::from_vaddr_to_vaddr_roundtrip(nat_align_down(pv, ps_gl1) as Vaddr);
2395            lemma_page_size_ge_page_size(gl as PagingLevel);
2396            lemma_page_size_ge_page_size((gl + 1) as PagingLevel);
2397            lemma_nat_align_down_monotone(pv, ps_gl, ps_gl1);
2398            lemma_nat_align_down_within_block(pv, ps_gl, ps_gl1);
2399        } else {
2400            // guard_level == NR_LEVELS: ps_gl1 covers the entire VA space.
2401            crate::specs::arch::paging_consts::lemma_nr_subpage_per_huge_eq_nr_entries();
2402            vstd_extra::external::ilog2::lemma_usize_ilog2_to32();
2403            vstd::arithmetic::power2::lemma2_to64();
2404            vstd::arithmetic::power2::lemma2_to64_rest();
2405            assert(page_size_spec(5) == pow2(48nat) as usize) by {
2406                vstd::arithmetic::power2::lemma_pow2_adds(12nat, 36nat);
2407            };
2408            self.va.to_vaddr_bounded();
2409            self.va.to_vaddr_indices_gap_bound(0);
2410            assert(node_start == 0) by {
2411                if nat_align_down(cv, ps_gl1) != 0 {
2412                    vstd::arithmetic::div_mod::lemma_small_mod(
2413                        nat_align_down(cv, ps_gl1),
2414                        ps_gl1,
2415                    );
2416                }
2417            };
2418            self.prefix.to_vaddr_indices_gap_bound(0);
2419        }
2420    }
2421
2422    pub proof fn jump_not_in_node_level_lt_guard_minus_one(
2423        self,
2424        level: PagingLevel,
2425        va: Vaddr,
2426        node_start: Vaddr,
2427    )
2428        requires
2429            self.inv(),
2430            self.locked_range().start <= va < self.locked_range().end,
2431            1 <= level,
2432            level + 1 <= self.guard_level,
2433            self.locked_range().start <= node_start,
2434            node_start + page_size((level + 1) as PagingLevel) <= self.locked_range().end,
2435            !(node_start <= va && va < node_start + page_size((level + 1) as PagingLevel)),
2436        ensures
2437            level + 1 < self.guard_level,
2438    {
2439        if level + 1 == self.guard_level {
2440            let pv = self.prefix.to_vaddr() as nat;
2441            let ps = page_size(self.guard_level as PagingLevel) as nat;
2442            self.prefix.align_down_concrete(self.guard_level as int);
2443            self.prefix.align_up_concrete(self.guard_level as int);
2444            self.prefix.align_diff(self.guard_level as int);
2445            AbstractVaddr::from_vaddr_to_vaddr_roundtrip(nat_align_down(pv, ps) as Vaddr);
2446            AbstractVaddr::from_vaddr_to_vaddr_roundtrip(nat_align_up(pv, ps) as Vaddr);
2447        }
2448    }
2449}
2450
2451pub tracked struct CursorView<C: PageTableConfig> {
2452    pub cur_va: Vaddr,
2453    pub mappings: Set<Mapping>,
2454    pub phantom: PhantomData<C>,
2455}
2456
2457impl<'rcu, C: PageTableConfig> View for CursorOwner<'rcu, C> {
2458    type V = CursorView<C>;
2459
2460    open spec fn view(&self) -> Self::V {
2461        CursorView {
2462            cur_va: self.cur_va(),
2463            mappings: self.view_mappings(),
2464            phantom: PhantomData,
2465        }
2466    }
2467}
2468
2469impl<C: PageTableConfig> Inv for CursorView<C> {
2470    open spec fn inv(self) -> bool {
2471        &&& self.cur_va < MAX_USERSPACE_VADDR
2472        &&& self.mappings.finite()
2473        &&& forall|m: Mapping| #![auto] self.mappings.contains(m) ==> m.inv()
2474        &&& forall|m: Mapping, n: Mapping| #![auto]
2475            self.mappings.contains(m) ==>
2476            self.mappings.contains(n) ==>
2477            m != n ==>
2478            m.va_range.end <= n.va_range.start || n.va_range.end <= m.va_range.start
2479    }
2480}
2481
2482impl<'rcu, C: PageTableConfig> InvView for CursorOwner<'rcu, C> {
2483    proof fn view_preserves_inv(self) {
2484        assert(self@.inv()) by { admit() };
2485    }
2486}
2487
2488impl<'rcu, C: PageTableConfig, A: InAtomicMode> Inv for Cursor<'rcu, C, A> {
2489    open spec fn inv(self) -> bool {
2490        &&& 1 <= self.level <= NR_LEVELS
2491        &&& self.level <= self.guard_level <= NR_LEVELS
2492//        &&& forall|i: int| 0 <= i < self.guard_level - self.level ==> self.path[i] is Some
2493        &&& self.va >= self.barrier_va.start
2494        &&& self.va % PAGE_SIZE == 0
2495    }
2496}
2497
2498impl<'rcu, C: PageTableConfig, A: InAtomicMode> OwnerOf for Cursor<'rcu, C, A> {
2499    type Owner = CursorOwner<'rcu, C>;
2500
2501    open spec fn wf(self, owner: Self::Owner) -> bool {
2502        &&& owner.va.reflect(self.va)
2503        &&& self.level == owner.level
2504        &&& owner.guard_level == self.guard_level
2505//        &&& owner.index() == self.va % page_size(self.level)
2506        &&& self.level <= 4 ==> {
2507            &&& self.path[3] is Some
2508            &&& owner.continuations.contains_key(3)
2509            &&& owner.continuations[3].guard_perm.pptr() == self.path[3].unwrap()
2510        }
2511        &&& self.level <= 3 ==> {
2512            &&& self.path[2] is Some
2513            &&& owner.continuations.contains_key(2)
2514            &&& owner.continuations[2].guard_perm.pptr() == self.path[2].unwrap()
2515        }
2516        &&& self.level <= 2 ==> {
2517            &&& self.path[1] is Some
2518            &&& owner.continuations.contains_key(1)
2519            &&& owner.continuations[1].guard_perm.pptr() == self.path[1].unwrap()
2520        }
2521        &&& self.level == 1 ==> {
2522            &&& self.path[0] is Some
2523            &&& owner.continuations.contains_key(0)
2524            &&& owner.continuations[0].guard_perm.pptr() == self.path[0].unwrap()
2525        }
2526        &&& self.barrier_va.start == owner.locked_range().start
2527        &&& self.barrier_va.end == owner.locked_range().end
2528    }
2529}
2530
2531impl<'rcu, C: PageTableConfig, A: InAtomicMode> ModelOf for Cursor<'rcu, C, A> {
2532
2533}
2534
2535} // verus!