Skip to main content

ostd/specs/mm/page_table/node/
entry_owners.rs

1use vstd::prelude::*;
2
3use vstd::cell;
4use vstd::simple_pptr::PointsTo;
5use vstd_extra::array_ptr;
6use vstd_extra::ghost_tree::*;
7use vstd_extra::ownership::*;
8
9use crate::mm::frame::meta::mapping::{frame_to_index, meta_addr, meta_to_frame};
10use crate::mm::frame::meta::MetaSlot;
11use crate::mm::frame::meta::REF_COUNT_UNUSED;
12use crate::mm::page_prop::PageProperty;
13use crate::mm::page_table::*;
14use crate::mm::{Paddr, PagingConstsTrait, PagingLevel, Vaddr};
15use crate::specs::arch::mm::{NR_ENTRIES, NR_LEVELS, PAGE_SIZE};
16use crate::specs::arch::paging_consts::PagingConsts;
17use crate::specs::arch::*;
18use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
19use crate::specs::mm::page_table::node::entry_view::*;
20use crate::specs::mm::page_table::*;
21use core::marker::PhantomData;
22
23verus! {
24
25pub tracked struct FrameEntryOwner {
26    pub mapped_pa: usize,
27    pub size: usize,
28    pub prop: PageProperty,
29}
30
31pub tracked struct EntryOwner<C: PageTableConfig> {
32    pub node: Option<NodeOwner<C>>,
33    pub frame: Option<FrameEntryOwner>,
34    pub locked: Option<Ghost<Seq<FrameView<C>>>>,
35    pub absent: bool,
36    pub in_scope: bool,
37    pub path: TreePath<NR_ENTRIES>,
38    pub parent_level: PagingLevel,
39}
40
41impl<C: PageTableConfig> EntryOwner<C> {
42    pub open spec fn is_node(self) -> bool {
43        self.node is Some
44    }
45
46    pub open spec fn is_frame(self) -> bool {
47        self.frame is Some
48    }
49
50    pub open spec fn is_locked(self) -> bool {
51        self.locked is Some
52    }
53
54    pub open spec fn is_absent(self) -> bool {
55        self.absent
56    }
57
58    pub open spec fn new_absent_spec(path: TreePath<NR_ENTRIES>, parent_level: PagingLevel) -> Self {
59        EntryOwner {
60            node: None,
61            frame: None,
62            locked: None,
63            absent: true,
64            in_scope: true,
65            path,
66            parent_level,
67        }
68    }
69
70    pub open spec fn new_frame_spec(paddr: Paddr, path: TreePath<NR_ENTRIES>, parent_level: PagingLevel, prop: PageProperty) -> Self {
71        EntryOwner {
72            node: None,
73            frame: Some(FrameEntryOwner { mapped_pa: paddr, size: page_size(parent_level), prop }),
74            locked: None,
75            absent: false,
76            in_scope: true,
77            path,
78            parent_level,
79        }
80    }
81
82    pub open spec fn new_node_spec(node: NodeOwner<C>, path: TreePath<NR_ENTRIES>) -> Self {
83        EntryOwner {
84            node: Some(node),
85            frame: None,
86            locked: None,
87            absent: false,
88            in_scope: true,
89            path,
90            parent_level: (node.level + 1) as PagingLevel,
91        }
92    }
93
94    pub axiom fn new_absent(path: TreePath<NR_ENTRIES>, parent_level: PagingLevel) -> tracked Self
95        returns Self::new_absent_spec(path, parent_level);
96
97    pub axiom fn new_frame(paddr: Paddr, path: TreePath<NR_ENTRIES>, parent_level: PagingLevel, prop: PageProperty) -> tracked Self
98        returns Self::new_frame_spec(paddr, path, parent_level, prop);
99
100    pub axiom fn new_node(node: NodeOwner<C>, path: TreePath<NR_ENTRIES>) -> tracked Self
101        returns Self::new_node_spec(node, path);
102
103    /// Creates a ghost entry owner for mapping an untracked (device memory) frame.
104    /// Unlike `new_frame`, this does not consume a slot permission from the meta region,
105    /// since device memory PAs are outside the tracked frame allocator.
106    /// The actual mapping correctness is guaranteed by the caller's `unsafe` contract.
107    ///
108    /// The `requires` reflect properties guaranteed by `collect_largest_pages` postconditions,
109    /// so this axiom is only ever called with values that satisfy them.
110    pub axiom fn new_untracked_frame(
111        paddr: Paddr,
112        parent_level: PagingLevel,
113        prop: PageProperty,
114    ) -> (tracked res: Self)
115        requires
116            paddr % PAGE_SIZE == 0,
117            paddr < MAX_PADDR,
118            1 <= parent_level,
119            parent_level <= NR_LEVELS,
120        ensures
121            res.is_frame(),
122            res.frame.unwrap().mapped_pa == paddr,
123            res.frame.unwrap().prop == prop,
124            res.frame.unwrap().size == page_size(parent_level),
125            res.parent_level == parent_level,
126            res.path.inv(),
127            res.in_scope,
128            res.inv_base(),
129            crate::mm::page_table::Child::<C>::Frame(paddr, parent_level, prop).wf(res);
130
131    pub open spec fn match_pte(self, pte: C::E, parent_level: PagingLevel) -> bool {
132        &&& pte.paddr() % PAGE_SIZE == 0
133        &&& pte.paddr() < MAX_PADDR
134        &&& !pte.is_present() ==> {
135            &&& self.is_absent()
136            &&& parent_level > 1 ==> !pte.is_last(parent_level)
137        }
138        &&& pte.is_present() && !pte.is_last(parent_level) ==> {
139            &&& self.is_node()
140            &&& meta_to_frame(self.node.unwrap().meta_perm.addr()) == pte.paddr()
141        }
142        &&& pte.is_present() && pte.is_last(parent_level) ==> {
143            &&& self.is_frame()
144            &&& self.frame.unwrap().mapped_pa == pte.paddr()
145            &&& self.frame.unwrap().prop == pte.prop()
146        }
147    }
148
149    /// When owner is absent and pte is the absent PTE with valid paddr, match_pte holds.
150    pub proof fn absent_match_pte(owner: Self, pte: C::E, parent_level: PagingLevel)
151        requires
152            owner.is_absent(),
153            pte == C::E::new_absent_spec(),
154            pte.paddr() % PAGE_SIZE == 0,
155            pte.paddr() < MAX_PADDR,
156        ensures
157            owner.match_pte(pte, parent_level),
158    {
159        C::E::new_properties();
160        assert(!pte.is_present());
161        if parent_level > 1 {
162            assert(!pte.is_last(parent_level));
163        }
164        if pte.is_present() && !pte.is_last(parent_level) {
165            assert(pte.is_present());
166            assert(!pte.is_present());
167        }
168        if pte.is_present() && pte.is_last(parent_level) {
169            assert(pte.is_present());
170            assert(!pte.is_present());
171        }
172    }
173
174    pub proof fn last_pte_implies_frame_match(self, pte: C::E, parent_level: PagingLevel)
175        requires
176            self.inv(),
177            self.match_pte(pte, parent_level),
178            1 < parent_level,
179            pte.is_last(parent_level),
180        ensures
181            self.is_frame(),
182            self.frame.unwrap().mapped_pa == pte.paddr(),
183            self.frame.unwrap().prop == pte.prop(),
184    {
185        if !pte.is_present() {
186            assert(self.is_absent());
187            assert(!pte.is_last(parent_level));
188            assert(false);
189        }
190        assert(self.is_frame());
191        assert(self.frame.unwrap().mapped_pa == pte.paddr());
192        assert(self.frame.unwrap().prop == pte.prop());
193    }
194
195    pub proof fn huge_frame_split_child_at(self, regions: MetaRegionOwners, idx: usize)
196        requires
197            self.inv(),
198            self.is_frame(),
199            regions.inv(),
200            1 < self.parent_level < NR_LEVELS,
201            idx < NR_ENTRIES,
202        ensures
203            self.frame.unwrap().mapped_pa + idx * page_size((self.parent_level - 1) as PagingLevel) < MAX_PADDR,
204            ((self.frame.unwrap().mapped_pa + idx * page_size((self.parent_level - 1) as PagingLevel)) as Paddr)
205                % page_size((self.parent_level - 1) as PagingLevel) == 0,
206            ((self.frame.unwrap().mapped_pa + idx * page_size((self.parent_level - 1) as PagingLevel)) as Paddr)
207                + page_size((self.parent_level - 1) as PagingLevel) <= MAX_PADDR,
208            ((self.frame.unwrap().mapped_pa + idx * page_size((self.parent_level - 1) as PagingLevel)) as Paddr) % PAGE_SIZE == 0,
209    {
210        let pa = self.frame.unwrap().mapped_pa;
211        let child_pa = (pa + idx * page_size((self.parent_level - 1) as PagingLevel)) as Paddr;
212        assert(self.parent_level == 2 || self.parent_level == 3);
213        assert(NR_ENTRIES == 512) by {
214            crate::specs::arch::paging_consts::lemma_nr_subpage_per_huge_eq_nr_entries();
215        };
216        assert(crate::mm::nr_subpage_per_huge::<PagingConsts>() == 512usize) by {
217            crate::specs::arch::paging_consts::lemma_nr_subpage_per_huge_eq_nr_entries();
218        };
219        vstd_extra::external::ilog2::lemma_usize_ilog2_to32();
220        crate::specs::mm::page_table::cursor::page_size_lemmas::lemma_page_size_spec_level1();
221        assert(512usize.ilog2() == 9);
222        assert(crate::mm::nr_subpage_per_huge::<PagingConsts>().ilog2() == 512usize.ilog2());
223        vstd::arithmetic::power2::lemma2_to64();
224        if self.parent_level == 2 {
225            assert(page_size_spec(1) == 4096);
226            assert(page_size_spec(2) == (PAGE_SIZE * pow2((512usize.ilog2() * 1usize) as nat)) as usize);
227            assert(page_size_spec(2) == (4096 * pow2(9)) as usize);
228            assert(page_size_spec(2) == 2097152);
229            assert(pa % page_size(2) == 0);
230            crate::specs::mm::page_table::cursor::page_size_lemmas::lemma_page_size_divides(1, 2);
231            assert(child_pa % page_size(1) == 0);
232            assert(child_pa + page_size(1) <= MAX_PADDR) by {
233                assert(idx < 512);
234                assert(idx * 4096 + 4096 <= 2097152);
235                assert(child_pa + page_size(1) <= pa + page_size(2));
236            };
237        } else {
238            assert(self.parent_level == 3);
239            assert(page_size_spec(2) == (PAGE_SIZE * pow2((512usize.ilog2() * 1usize) as nat)) as usize);
240            assert(page_size_spec(2) == (4096 * pow2(9)) as usize);
241            assert(page_size_spec(2) == 2097152);
242            assert(page_size_spec(3) == (PAGE_SIZE * pow2((512usize.ilog2() * 2usize) as nat)) as usize);
243            assert(page_size_spec(3) == (4096 * pow2(18)) as usize);
244            assert(page_size_spec(3) == 1073741824);
245            assert(pa % page_size(3) == 0);
246            assert(pa % PAGE_SIZE == 0);
247            crate::specs::mm::page_table::cursor::page_size_lemmas::lemma_va_align_page_size(pa, 2);
248            assert(child_pa == pa + idx * page_size(2));
249            vstd::arithmetic::div_mod::lemma_mod_multiples_basic(idx as int, page_size(2) as int);
250            vstd::arithmetic::div_mod::lemma_add_mod_noop(
251                pa as int,
252                (idx * page_size(2)) as int,
253                page_size(2) as int,
254            );
255            assert(child_pa % page_size(2) == 0);
256            assert(child_pa + page_size(2) <= MAX_PADDR) by {
257                assert(idx < 512);
258                assert(idx * 2097152 + 2097152 <= 1073741824);
259                assert(child_pa + page_size(2) <= pa + page_size(3));
260            };
261        }
262        assert(child_pa < MAX_PADDR);
263        assert(child_pa % PAGE_SIZE == 0);
264    }
265
266    pub open spec fn expected_raw_count(self) -> usize {
267        if self.in_scope {
268            0
269        } else {
270            1
271        }
272    }
273
274    /// Helper: sub-page validity is preserved when the only slot that changed is the
275    /// frame's own slot (and slots map and other slot owners are unchanged).
276    pub proof fn frame_sub_pages_valid_preserved_at_own_slot(
277        self,
278        r0: MetaRegionOwners,
279        r1: MetaRegionOwners,
280    )
281        requires
282            self.inv(),
283            r0.inv(),
284            self.is_frame(),
285            self.parent_level <= NR_LEVELS,
286            self.frame_sub_pages_valid(r0),
287            r0.slots == r1.slots,
288            r0.slot_owners.dom() =~= r1.slot_owners.dom(),
289            forall |i: usize| #![trigger r1.slot_owners[i]]
290                i != frame_to_index(self.meta_slot_paddr().unwrap())
291                    && r0.slot_owners.contains_key(i)
292                    ==> r0.slot_owners[i] == r1.slot_owners[i],
293        ensures
294            self.frame_sub_pages_valid(r1),
295    {
296        if self.parent_level > 1 {
297            let pa = self.frame.unwrap().mapped_pa;
298            let nr_pages = page_size(self.parent_level) / PAGE_SIZE;
299            let self_idx = frame_to_index(self.meta_slot_paddr().unwrap());
300            assert forall |j: usize| #![trigger frame_to_index((pa + j * PAGE_SIZE) as usize)]
301                0 < j < nr_pages implies {
302                let sub_idx = frame_to_index((pa + j * PAGE_SIZE) as usize);
303                &&& r1.slots.contains_key(sub_idx)
304                &&& r1.slot_owners[sub_idx].inner_perms.ref_count.value() != REF_COUNT_UNUSED
305            } by {
306                let sub_idx = frame_to_index((pa + j * PAGE_SIZE) as usize);
307                // From frame_sub_pages_valid(r0):
308                assert(r0.slots.contains_key(sub_idx));
309                assert(r0.slot_owners[sub_idx].inner_perms.ref_count.value() != REF_COUNT_UNUSED);
310                // sub_idx != self_idx by arithmetic: pa is PAGE_SIZE-aligned, so
311                // self_idx = pa / PAGE_SIZE, and sub_idx = (pa + j*PAGE_SIZE) / PAGE_SIZE
312                //         = pa/PAGE_SIZE + j = self_idx + j > self_idx (since j >= 1).
313                let pa_plus_int: int = pa as int + (j as int) * (PAGE_SIZE as int);
314                // Cast safety: pa_plus_int < pa + page_size(parent_level) <= MAX_PADDR.
315                crate::specs::mm::page_table::cursor::page_size_lemmas::
316                    lemma_page_size_ge_page_size(self.parent_level);
317                assert((j as int) * (PAGE_SIZE as int) < (nr_pages as int) * (PAGE_SIZE as int)) by {
318                    vstd::arithmetic::mul::lemma_mul_strict_inequality(
319                        j as int, nr_pages as int, PAGE_SIZE as int);
320                };
321                // nr_pages * PAGE_SIZE == page_size(parent_level)
322                crate::specs::mm::page_table::cursor::page_size_lemmas::
323                    lemma_page_size_div_mul_eq(self.parent_level);
324                assert(pa_plus_int < MAX_PADDR);
325                // sub_idx = (pa + j*PAGE_SIZE) / PAGE_SIZE = pa/PAGE_SIZE + j
326                // sub_idx = (pa + j*PAGE_SIZE) / PAGE_SIZE = pa/PAGE_SIZE + j (since pa % PAGE_SIZE == 0).
327                vstd::arithmetic::div_mod::lemma_div_multiples_vanish_quotient(
328                    j as int, pa as int, PAGE_SIZE as int);
329                assert(self_idx as int == pa as int / PAGE_SIZE as int);
330                assert(sub_idx != self_idx);
331                assert(r0.slot_owners.contains_key(sub_idx));
332                assert(r0.slot_owners[sub_idx] == r1.slot_owners[sub_idx]);
333            }
334        }
335    }
336
337    /// Sub-page slot validity for huge frames (fine-grained: all 4KB pages within).
338    ///
339    /// When a frame at this entry has `parent_level > 1`, it is a huge page covering
340    /// `page_size(parent_level)` bytes. Every 4KB sub-page within this range (excluding
341    /// the j = 0 case which coincides with the frame's own slot) must be allocated
342    /// (in the free pool) with `rc != UNUSED`.
343    ///
344    /// The fine-grained form (over `j * PAGE_SIZE` rather than `j * page_size(L-1)`) is
345    /// what enables the recursive split case in `split_if_mapped_huge`: when splitting a
346    /// 1GB frame into 2MB sub-frames, each 2MB sub-frame's own sub-page validity (over
347    /// its 511 4KB sub-sub-pages) follows from the 1GB frame's fine-grained validity over
348    /// the corresponding subrange of indices.
349    pub open spec fn frame_sub_pages_valid(self, regions: MetaRegionOwners) -> bool {
350        self.is_frame() && self.parent_level > 1 ==> {
351            let pa = self.frame.unwrap().mapped_pa;
352            let nr_pages = page_size(self.parent_level) / PAGE_SIZE;
353            forall |j: usize| #![trigger frame_to_index((pa + j * PAGE_SIZE) as usize)]
354                0 < j < nr_pages ==> {
355                let sub_idx = frame_to_index((pa + j * PAGE_SIZE) as usize);
356                &&& regions.slots.contains_key(sub_idx)
357                &&& regions.slot_owners[sub_idx].inner_perms.ref_count.value() != REF_COUNT_UNUSED
358                &&& regions.slot_owners[sub_idx].inner_perms.ref_count.value() > 0
359            }
360        }
361    }
362
363    pub open spec fn metaregion_sound(self, regions: MetaRegionOwners) -> bool {
364        if self.is_node() {
365            let idx = frame_to_index(self.meta_slot_paddr().unwrap());
366            &&& regions.slot_owners[idx].inner_perms.ref_count.value() != REF_COUNT_UNUSED
367            &&& regions.slot_owners[idx].raw_count == self.expected_raw_count()
368            &&& regions.slot_owners[idx].self_addr == self.node.unwrap().meta_perm.addr()
369            &&& self.node.unwrap().meta_perm.points_to.value().wf(regions.slot_owners[idx])
370            // Node path tracking: ensures no two tree nodes share the same slot index.
371            &&& regions.slot_owners[idx].paths_in_pt == set![self.path]
372        } else if self.is_frame() {
373            let idx = frame_to_index(self.meta_slot_paddr().unwrap());
374            &&& regions.slots.contains_key(idx)
375            &&& regions.slots[idx].addr() == meta_addr(idx)
376            &&& regions.slots[idx].is_init()
377            &&& regions.slots[idx].value().wf(regions.slot_owners[idx])
378            &&& regions.slot_owners[idx].inner_perms.ref_count.value() != REF_COUNT_UNUSED
379            &&& regions.slot_owners[idx].inner_perms.ref_count.value() > 0
380            &&& regions.slot_owners[idx].paths_in_pt.contains(self.path)
381            &&& self.frame_sub_pages_valid(regions)
382        } else {
383            true
384        }
385    }
386
387    /// PointsTo uniqueness: if meta slot `free_idx` is in the free pool (`regions.slots`),
388    /// no active page table NODE entry can own a PointsTo at the same slot address.
389    /// Justified by Verus's linear ownership of `PointsTo<MetaSlot>`:
390    /// the node's PointsTo is either in `regions.slots` OR held by the node, never both.
391    /// (Frames no longer own their PointsTo — they read from `regions.slots` directly.)
392    pub axiom fn active_entry_not_in_free_pool(
393        entry: Self,
394        regions: MetaRegionOwners,
395        free_idx: usize,
396    )
397        requires
398            regions.inv(),
399            entry.inv(),
400            entry.is_node(),
401            entry.metaregion_sound(regions),
402            regions.slots.contains_key(free_idx),
403        ensures
404            frame_to_index(entry.meta_slot_paddr().unwrap()) != free_idx;
405
406    pub axiom fn get_path(self) -> tracked TreePath<NR_ENTRIES>
407        returns self.path;
408
409    pub open spec fn meta_slot_paddr(self) -> Option<Paddr> {
410        if self.is_node() {
411            Some(meta_to_frame(self.node.unwrap().meta_perm.addr()))
412        } else if self.is_frame() {
413            Some(self.frame.unwrap().mapped_pa)
414        } else {
415            None
416        }
417    }
418
419    pub open spec fn meta_slot_paddr_neq(self, other: Self) -> bool {
420        self.meta_slot_paddr() is Some ==>
421        other.meta_slot_paddr() is Some ==>
422        self.meta_slot_paddr().unwrap() != other.meta_slot_paddr().unwrap()
423    }
424
425    /// `metaregion_sound` transfers when `slot_owners` matches and `slots` is a superset.
426    /// For nodes: only `slot_owners` matters. For frames: `slots.contains_key` and `slots[idx]`
427    /// must be preserved, which holds when `slots` is a superset with values unchanged.
428    pub proof fn metaregion_sound_slot_owners_only(self, r0: MetaRegionOwners, r1: MetaRegionOwners)
429        requires
430            self.metaregion_sound(r0),
431            r0.slot_owners == r1.slot_owners,
432            forall |k: usize| r0.slots.contains_key(k) ==> #[trigger] r1.slots.contains_key(k),
433            forall |k: usize| r0.slots.contains_key(k) ==> r0.slots[k] == #[trigger] r1.slots[k],
434        ensures
435            self.metaregion_sound(r1),
436    {
437    }
438
439    /// If `metaregion_sound(r0)` holds and `r1` differs from `r0` only at one slot index
440    /// that this entry does not reference, then `metaregion_sound(r1)` also holds.
441    pub proof fn metaregion_sound_one_slot_changed(self, r0: MetaRegionOwners, r1: MetaRegionOwners, changed_idx: usize)
442        requires
443            self.metaregion_sound(r0),
444            forall |i: usize| #![trigger r1.slot_owners[i]]
445                i != changed_idx ==> r0.slot_owners[i] == r1.slot_owners[i],
446            r0.slot_owners.dom() =~= r1.slot_owners.dom(),
447            // slots preserved at the entry's index (frames read from slots)
448            forall |k: usize| r0.slots.contains_key(k) ==> #[trigger] r1.slots.contains_key(k),
449            forall |k: usize| r0.slots.contains_key(k) ==> r0.slots[k] == #[trigger] r1.slots[k],
450            self.meta_slot_paddr() is Some ==>
451                frame_to_index(self.meta_slot_paddr().unwrap()) != changed_idx,
452            // For huge frames: if changed_idx is one of this frame's 4KB sub-page slots,
453            // the sub-page validity at changed_idx must still hold in r1.
454            self.is_frame() && self.parent_level > 1 ==> {
455                let pa = self.frame.unwrap().mapped_pa;
456                let nr_pages = page_size(self.parent_level) / PAGE_SIZE;
457                forall |j: usize| 0 < j < nr_pages ==> {
458                    let sub_idx = #[trigger] frame_to_index((pa + j * PAGE_SIZE) as usize);
459                    sub_idx != changed_idx
460                    || (
461                        r1.slots.contains_key(sub_idx)
462                        && r1.slot_owners[sub_idx].inner_perms.ref_count.value() != REF_COUNT_UNUSED
463                        && r1.slot_owners[sub_idx].inner_perms.ref_count.value() > 0
464                    )
465                }
466            },
467        ensures
468            self.metaregion_sound(r1),
469    {
470    }
471
472    /// `metaregion_sound` is preserved when only `paths_in_pt` changes at a slot,
473    /// `slots` is unchanged, and the new `paths_in_pt` is correct for any node at that index.
474    pub proof fn metaregion_sound_paths_in_pt_changed(self, r0: MetaRegionOwners, r1: MetaRegionOwners, changed_idx: usize)
475        requires
476            self.inv(),
477            r0.inv(),
478            self.metaregion_sound(r0),
479            r0.slots == r1.slots,
480            r0.slot_owners.dom() =~= r1.slot_owners.dom(),
481            // All slots other than changed_idx are entirely unchanged.
482            forall |i: usize| #![trigger r1.slot_owners[i]]
483                i != changed_idx ==> r0.slot_owners[i] == r1.slot_owners[i],
484            // At changed_idx, only paths_in_pt differs.
485            r1.slot_owners[changed_idx].inner_perms == r0.slot_owners[changed_idx].inner_perms,
486            r1.slot_owners[changed_idx].self_addr == r0.slot_owners[changed_idx].self_addr,
487            r1.slot_owners[changed_idx].raw_count == r0.slot_owners[changed_idx].raw_count,
488            r1.slot_owners[changed_idx].usage == r0.slot_owners[changed_idx].usage,
489            // For nodes at changed_idx: the new paths_in_pt must match this entry's path.
490            self.is_node() && self.meta_slot_paddr() is Some
491                && frame_to_index(self.meta_slot_paddr().unwrap()) == changed_idx
492                ==> r1.slot_owners[changed_idx].paths_in_pt == set![self.path],
493            // For frames at changed_idx: the new paths_in_pt must still contain this entry's path.
494            self.is_frame() && self.meta_slot_paddr() is Some
495                && frame_to_index(self.meta_slot_paddr().unwrap()) == changed_idx
496                ==> r1.slot_owners[changed_idx].paths_in_pt.contains(self.path),
497            // For huge frames: if changed_idx is one of this frame's sub-page slots (j > 0),
498            // the new paths_in_pt at changed_idx must remain empty.
499            self.is_frame() && self.parent_level > 1 ==> {
500                let pa = self.frame.unwrap().mapped_pa;
501                let sub_level = (self.parent_level - 1) as PagingLevel;
502                forall |j: usize| 0 < j < NR_ENTRIES ==> {
503                    let sub_idx = #[trigger] frame_to_index((pa + j * page_size(sub_level)) as usize);
504                    sub_idx != changed_idx || r1.slot_owners[changed_idx].paths_in_pt.is_empty()
505                }
506            },
507        ensures
508            self.metaregion_sound(r1),
509    {
510        if self.meta_slot_paddr() is Some {
511            let eidx = frame_to_index(self.meta_slot_paddr().unwrap());
512            // Bridge `rc > 0` from r0 to r1: at `eidx == changed_idx` inner_perms
513            // are identical; elsewhere the entire slot_owner is identical.
514            if self.is_frame() {
515                if eidx == changed_idx {
516                    assert(r1.slot_owners[eidx].inner_perms
517                        == r0.slot_owners[eidx].inner_perms);
518                } else {
519                    assert(r1.slot_owners[eidx] == r0.slot_owners[eidx]);
520                }
521                // Sub-page `rc > 0` for huge frames: same reasoning, per j.
522                if self.parent_level > 1 {
523                    let pa = self.frame.unwrap().mapped_pa;
524                    let nr_pages = page_size(self.parent_level) / PAGE_SIZE;
525                    let self_idx = frame_to_index(self.meta_slot_paddr().unwrap());
526                    assert forall |j: usize|
527                        #![trigger frame_to_index((pa + j * PAGE_SIZE) as usize)]
528                        0 < j < nr_pages implies {
529                        let sub_idx = frame_to_index((pa + j * PAGE_SIZE) as usize);
530                        &&& r1.slot_owners[sub_idx].inner_perms.ref_count.value() > 0
531                        &&& r1.slot_owners[sub_idx].inner_perms.ref_count.value() != REF_COUNT_UNUSED
532                        &&& r1.slots.contains_key(sub_idx)
533                    } by {
534                        let sub_idx = frame_to_index((pa + j * PAGE_SIZE) as usize);
535                        // These come from self.metaregion_sound(r0)'s frame arm (since
536                        // self.is_frame() && self.parent_level > 1) which includes
537                        // frame_sub_pages_valid(r0).
538                        assert(r0.slots.contains_key(sub_idx));
539                        assert(r0.slot_owners[sub_idx].inner_perms.ref_count.value() > 0);
540                        assert(r0.slot_owners[sub_idx].inner_perms.ref_count.value()
541                            != REF_COUNT_UNUSED);
542                        if sub_idx == changed_idx {
543                            assert(r1.slot_owners[sub_idx].inner_perms
544                                == r0.slot_owners[sub_idx].inner_perms);
545                        } else {
546                            assert(r1.slot_owners[sub_idx] == r0.slot_owners[sub_idx]);
547                        }
548                    }
549                }
550            }
551        }
552    }
553
554    /// Two entries with the same physical address whose `paths_in_pt` matches their
555    /// respective paths must have the same path.
556    pub proof fn same_paddr_implies_same_path(self, other: Self, regions: MetaRegionOwners)
557        requires
558            self.meta_slot_paddr() is Some,
559            self.meta_slot_paddr() == other.meta_slot_paddr(),
560            regions.slot_owners[
561                frame_to_index(self.meta_slot_paddr().unwrap())
562            ].paths_in_pt == set![self.path],
563            regions.slot_owners[
564                frame_to_index(self.meta_slot_paddr().unwrap())
565            ].paths_in_pt == set![other.path],
566        ensures
567            self.path == other.path,
568    {
569        assert(set![self.path].contains(other.path));
570    }
571
572    /// `metaregion_sound` is preserved when only `ref_count.value()` changes at this entry's slot
573    /// and `slots` is unchanged.
574    pub proof fn metaregion_sound_rc_value_changed(self, r0: MetaRegionOwners, r1: MetaRegionOwners)
575        requires
576            self.inv(),
577            r0.inv(),
578            self.metaregion_sound(r0),
579            self.meta_slot_paddr() is Some,
580            r0.slots == r1.slots,
581            ({
582                let idx = frame_to_index(self.meta_slot_paddr().unwrap());
583                &&& r1.slot_owners[idx].inner_perms.ref_count.id()
584                    == r0.slot_owners[idx].inner_perms.ref_count.id()
585                &&& r1.slot_owners[idx].inner_perms.ref_count.value()
586                    != crate::specs::mm::frame::meta_owners::REF_COUNT_UNUSED
587                &&& r1.slot_owners[idx].inner_perms.ref_count.value() > 0
588                &&& r1.slot_owners[idx].inner_perms.storage
589                    == r0.slot_owners[idx].inner_perms.storage
590                &&& r1.slot_owners[idx].inner_perms.vtable_ptr
591                    == r0.slot_owners[idx].inner_perms.vtable_ptr
592                &&& r1.slot_owners[idx].inner_perms.in_list
593                    == r0.slot_owners[idx].inner_perms.in_list
594                &&& r1.slot_owners[idx].self_addr == r0.slot_owners[idx].self_addr
595                &&& r1.slot_owners[idx].raw_count == r0.slot_owners[idx].raw_count
596                &&& r1.slot_owners[idx].paths_in_pt == r0.slot_owners[idx].paths_in_pt
597            }),
598            // All other slot_owners unchanged: preserves sub-page validity for huge frames.
599            forall |i: usize| #![trigger r1.slot_owners[i]]
600                i != frame_to_index(self.meta_slot_paddr().unwrap())
601                    && r0.slot_owners.contains_key(i)
602                    ==> r0.slot_owners[i] == r1.slot_owners[i],
603        ensures
604            self.metaregion_sound(r1),
605    {
606        if self.is_frame() && self.parent_level > 1 {
607            let pa = self.frame.unwrap().mapped_pa;
608            let nr_pages = page_size(self.parent_level) / PAGE_SIZE;
609            let self_idx = frame_to_index(self.meta_slot_paddr().unwrap());
610            assert forall |j: usize| #![trigger frame_to_index((pa + j * PAGE_SIZE) as usize)]
611                0 < j < nr_pages implies {
612                let sub_idx = frame_to_index((pa + j * PAGE_SIZE) as usize);
613                &&& r1.slots.contains_key(sub_idx)
614                &&& r1.slot_owners[sub_idx].inner_perms.ref_count.value() != REF_COUNT_UNUSED
615            } by {
616                let sub_idx = frame_to_index((pa + j * PAGE_SIZE) as usize);
617                assert(r0.slots.contains_key(sub_idx));
618                assert(r0.slot_owners[sub_idx].inner_perms.ref_count.value() != REF_COUNT_UNUSED);
619                let pa_plus_int: int = pa as int + (j as int) * (PAGE_SIZE as int);
620                crate::specs::mm::page_table::cursor::page_size_lemmas::
621                    lemma_page_size_ge_page_size(self.parent_level);
622                assert((j as int) * (PAGE_SIZE as int) < (nr_pages as int) * (PAGE_SIZE as int)) by {
623                    vstd::arithmetic::mul::lemma_mul_strict_inequality(
624                        j as int, nr_pages as int, PAGE_SIZE as int);
625                };
626                crate::specs::mm::page_table::cursor::page_size_lemmas::
627                    lemma_page_size_div_mul_eq(self.parent_level);
628                assert(pa_plus_int < MAX_PADDR);
629                // sub_idx = (pa + j*PAGE_SIZE) / PAGE_SIZE = pa/PAGE_SIZE + j (since pa % PAGE_SIZE == 0).
630                vstd::arithmetic::div_mod::lemma_div_multiples_vanish_quotient(
631                    j as int, pa as int, PAGE_SIZE as int);
632                assert(self_idx as int == pa as int / PAGE_SIZE as int);
633                assert(sub_idx != self_idx);
634                assert(r0.slot_owners.contains_key(sub_idx));
635                assert(r0.slot_owners[sub_idx] == r1.slot_owners[sub_idx]);
636            }
637        }
638    }
639
640    /// Two nodes whose `paths_in_pt` matches their paths have different addresses
641    /// if they have different paths.
642    pub proof fn nodes_different_paths_different_addrs(
643        self,
644        other: Self,
645        regions: MetaRegionOwners,
646    )
647        requires
648            self.is_node(),
649            other.is_node(),
650            self.meta_slot_paddr() is Some ==> regions.slot_owners[frame_to_index(self.meta_slot_paddr().unwrap())].paths_in_pt == set![self.path],
651            other.meta_slot_paddr() is Some ==> regions.slot_owners[frame_to_index(other.meta_slot_paddr().unwrap())].paths_in_pt == set![other.path],
652            self.path != other.path,
653        ensures
654            self.node.unwrap().meta_perm.addr() != other.node.unwrap().meta_perm.addr(),
655    {
656        let self_addr = self.node.unwrap().meta_perm.addr();
657        let other_addr = other.node.unwrap().meta_perm.addr();
658        let self_idx = frame_to_index(meta_to_frame(self_addr));
659        let other_idx = frame_to_index(meta_to_frame(other_addr));
660
661        if self_addr == other_addr {
662            assert(regions.slot_owners[self_idx].paths_in_pt == set![self.path]);
663            assert(regions.slot_owners[other_idx].paths_in_pt == set![other.path]);
664            assert(set![self.path].contains(other.path));
665            assert(self.path == other.path);
666            assert(false); // Contradiction
667        }
668    }
669
670    /// Two node entries with `metaregion_sound` under the same regions cannot share
671    /// a meta slot paddr if their paths have different lengths.
672    ///
673    /// For nodes, `metaregion_sound` requires `paths_in_pt == set![path]` (singleton).
674    /// Equal slot indices would force equal singleton sets, hence equal paths —
675    /// contradicting the length difference.
676    pub proof fn nodes_different_path_lengths_neq_slot(
677        self,
678        other: Self,
679        regions: MetaRegionOwners,
680    )
681        requires
682            self.is_node(),
683            other.is_node(),
684            self.metaregion_sound(regions),
685            other.metaregion_sound(regions),
686            self.path.len() != other.path.len(),
687        ensures
688            self.meta_slot_paddr_neq(other),
689    {
690        let self_idx = frame_to_index(self.meta_slot_paddr().unwrap());
691        let other_idx = frame_to_index(other.meta_slot_paddr().unwrap());
692        if self_idx == other_idx {
693            assert(regions.slot_owners[self_idx].paths_in_pt == set![self.path]);
694            assert(regions.slot_owners[other_idx].paths_in_pt == set![other.path]);
695            assert(set![self.path].contains(other.path));
696            assert(self.path == other.path);
697            assert(false);
698        }
699    }
700}
701
702impl<C: PageTableConfig> EntryOwner<C> {
703    /// Structural invariant without `!in_scope`. Used by `Child::invariants`
704    /// for entries that have been taken out of the tree (`in_scope == true`).
705    pub open spec fn inv_base(self) -> bool {
706        &&& self.node is Some ==> {
707            &&& self.frame is None
708            &&& self.locked is None
709            &&& self.node.unwrap().inv()
710            &&& !self.absent
711            &&& self.parent_level == self.node.unwrap().level + 1
712        }
713        &&& self.frame is Some ==> {
714            &&& self.node is None
715            &&& self.locked is None
716            &&& !self.absent
717            // Architectural constraint: frames only exist at PT levels that the
718            // ISA actually supports as leaves (4K, 2M, 1G on x86). `parent_level
719            // == NR_LEVELS` would be a 512 GiB huge page, which no current arch
720            // permits — and `Mapping::inv` would reject its page_size.
721            &&& 1 <= self.parent_level < NR_LEVELS
722            &&& self.frame.unwrap().mapped_pa % PAGE_SIZE == 0
723            &&& self.frame.unwrap().mapped_pa < MAX_PADDR
724            &&& self.frame.unwrap().size == page_size(self.parent_level)
725            &&& self.frame.unwrap().mapped_pa % page_size(self.parent_level) == 0
726            &&& self.frame.unwrap().mapped_pa + page_size(self.parent_level) <= MAX_PADDR
727        }
728        &&& self.locked is Some ==> {
729            &&& self.frame is None
730            &&& self.node is None
731            &&& !self.absent
732        }
733        &&& self.path.inv()
734    }
735
736    pub open spec fn set_in_scope(self, in_scope: bool) -> Self {
737        EntryOwner { in_scope, ..self }
738    }
739}
740
741impl<C: PageTableConfig> Inv for EntryOwner<C> {
742    open spec fn inv(self) -> bool {
743        &&& !self.in_scope
744        &&& self.inv_base()
745    }
746}
747
748impl<C: PageTableConfig> View for EntryOwner<C> {
749    type V = EntryView<C>;
750
751    #[verifier::external_body]
752    open spec fn view(&self) -> <Self as View>::V {
753        if let Some(frame) = self.frame {
754            EntryView::Leaf {
755                leaf: LeafPageTableEntryView {
756                    map_va: vaddr(self.path) as int,
757                    //                    frame_pa: self.base_addr as int,
758                    //                    in_frame_index: self.index as int,
759                    map_to_pa: frame.mapped_pa as int,
760                    level: self.path.len() as u8,
761                    prop: frame.prop,
762                    phantom: PhantomData,
763                },
764            }
765        } else if let Some(node) = self.node {
766            EntryView::Intermediate {
767                node: IntermediatePageTableEntryView {
768                    map_va: vaddr(self.path) as int,
769                    //                    frame_pa: self.base_addr as int,
770                    //                    in_frame_index: self.index as int,
771                    map_to_pa: meta_to_frame(node.meta_perm.addr()) as int,
772                    level: self.path.len() as u8,
773                    phantom: PhantomData,
774                },
775            }
776        } else if let Some(view) = self.locked {
777            EntryView::LockedSubtree { views: view@ }
778        } else {
779            EntryView::Absent
780        }
781    }
782}
783
784impl<C: PageTableConfig> InvView for EntryOwner<C> {
785    proof fn view_preserves_inv(self) {
786        admit()
787    }
788}
789
790impl<'rcu, C: PageTableConfig> OwnerOf for Entry<'rcu, C> {
791    type Owner = EntryOwner<C>;
792
793    open spec fn wf(self, owner: Self::Owner) -> bool {
794        &&& self.idx < NR_ENTRIES
795        &&& owner.match_pte(self.pte, owner.parent_level)
796        &&& self.pte.paddr() % PAGE_SIZE == 0
797        &&& self.pte.paddr() < MAX_PADDR
798    }
799}
800
801} // verus!