ostd/specs/mm/page_table/node/
entry_owners.rs

1use vstd::prelude::*;
2
3use vstd::cell;
4use vstd::simple_pptr::PointsTo;
5use vstd_extra::array_ptr;
6use vstd_extra::ghost_tree::*;
7use vstd_extra::ownership::*;
8
9use crate::mm::frame::meta::mapping::{frame_to_index, meta_addr, meta_to_frame};
10use crate::mm::frame::meta::MetaSlot;
11use crate::mm::frame::meta::REF_COUNT_UNUSED;
12use crate::mm::page_prop::PageProperty;
13use crate::mm::page_table::*;
14use crate::mm::{Paddr, PagingConstsTrait, PagingLevel, Vaddr};
15use crate::specs::arch::mm::{NR_ENTRIES, NR_LEVELS, PAGE_SIZE};
16use crate::specs::arch::paging_consts::PagingConsts;
17use crate::specs::arch::*;
18use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
19use crate::specs::mm::page_table::node::entry_view::*;
20use crate::specs::mm::page_table::*;
21use core::marker::PhantomData;
22
23verus! {
24
25pub tracked struct FrameEntryOwner {
26    pub mapped_pa: usize,
27    pub size: usize,
28    pub prop: PageProperty,
29    pub slot_perm: PointsTo<MetaSlot>,
30}
31
32pub tracked struct EntryOwner<C: PageTableConfig> {
33    pub node: Option<NodeOwner<C>>,
34    pub frame: Option<FrameEntryOwner>,
35    pub locked: Option<Ghost<Seq<FrameView<C>>>>,
36    pub absent: bool,
37    pub in_scope: bool,
38    pub path: TreePath<NR_ENTRIES>,
39    pub parent_level: PagingLevel,
40}
41
42impl<C: PageTableConfig> EntryOwner<C> {
43    pub open spec fn is_node(self) -> bool {
44        self.node is Some
45    }
46
47    pub open spec fn is_frame(self) -> bool {
48        self.frame is Some
49    }
50
51    pub open spec fn is_locked(self) -> bool {
52        self.locked is Some
53    }
54
55    pub open spec fn is_absent(self) -> bool {
56        self.absent
57    }
58
59    pub open spec fn new_absent_spec(path: TreePath<NR_ENTRIES>, parent_level: PagingLevel) -> Self {
60        EntryOwner {
61            node: None,
62            frame: None,
63            locked: None,
64            absent: true,
65            in_scope: true,
66            path,
67            parent_level,
68        }
69    }
70
71    pub open spec fn new_frame_spec(paddr: Paddr, path: TreePath<NR_ENTRIES>, parent_level: PagingLevel, prop: PageProperty, slot_perm: PointsTo<MetaSlot>) -> Self {
72        EntryOwner {
73            node: None,
74            frame: Some(FrameEntryOwner { mapped_pa: paddr, size: page_size(parent_level), prop, slot_perm }),
75            locked: None,
76            absent: false,
77            in_scope: true,
78            path,
79            parent_level,
80        }
81    }
82
83    pub open spec fn new_node_spec(node: NodeOwner<C>, path: TreePath<NR_ENTRIES>) -> Self {
84        EntryOwner {
85            node: Some(node),
86            frame: None,
87            locked: None,
88            absent: false,
89            in_scope: true,
90            path,
91            parent_level: (node.level + 1) as PagingLevel,
92        }
93    }
94
95    pub axiom fn new_absent(path: TreePath<NR_ENTRIES>, parent_level: PagingLevel) -> tracked Self
96        returns Self::new_absent_spec(path, parent_level);
97
98    pub axiom fn new_frame(paddr: Paddr, path: TreePath<NR_ENTRIES>, parent_level: PagingLevel, prop: PageProperty, tracked slot_perm: PointsTo<MetaSlot>) -> tracked Self
99        returns Self::new_frame_spec(paddr, path, parent_level, prop, slot_perm);
100
101    /// Produces a slot permission for a frame address without requiring it from `regions.slots`.
102    /// Used as a placeholder in cases (e.g. huge-page split) where the sub-frame slot perms
103    /// are not yet fully tracked.  Replace with real perm threading when the split path is verified.
104    pub axiom fn placeholder_slot_perm(paddr: Paddr, tracked regions: &MetaRegionOwners) -> (tracked res: PointsTo<MetaSlot>)
105        requires
106            regions.inv(),
107            paddr % PAGE_SIZE == 0,
108            paddr < MAX_PADDR,
109        ensures
110            res.addr() == meta_addr(frame_to_index(paddr)),
111            res.is_init(),
112            res.value().wf(regions.slot_owners[frame_to_index(paddr)]),
113            regions.slot_owners[frame_to_index(paddr)].inner_perms.ref_count.value() != REF_COUNT_UNUSED,
114            regions.slot_owners[frame_to_index(paddr)].path_if_in_pt is None;
115
116    pub axiom fn new_node(node: NodeOwner<C>, path: TreePath<NR_ENTRIES>) -> tracked Self
117        returns Self::new_node_spec(node, path);
118
119    /// Creates a ghost entry owner for mapping an untracked (device memory) frame.
120    /// Unlike `new_frame`, this does not consume a slot permission from the meta region,
121    /// since device memory PAs are outside the tracked frame allocator.
122    /// The actual mapping correctness is guaranteed by the caller's `unsafe` contract.
123    ///
124    /// The `requires` reflect properties guaranteed by `collect_largest_pages` postconditions,
125    /// so this axiom is only ever called with values that satisfy them.
126    pub axiom fn new_untracked_frame(
127        paddr: Paddr,
128        parent_level: PagingLevel,
129        prop: PageProperty,
130    ) -> (tracked res: Self)
131        requires
132            paddr % PAGE_SIZE == 0,
133            paddr < MAX_PADDR,
134            1 <= parent_level,
135            parent_level <= NR_LEVELS,
136        ensures
137            res.is_frame(),
138            res.frame.unwrap().mapped_pa == paddr,
139            res.frame.unwrap().prop == prop,
140            res.frame.unwrap().size == page_size(parent_level),
141            res.parent_level == parent_level,
142            res.path.inv(),
143            res.in_scope,
144            res.inv(),
145            crate::mm::page_table::Child::<C>::Frame(paddr, parent_level, prop).wf(res);
146
147    pub open spec fn match_pte(self, pte: C::E, parent_level: PagingLevel) -> bool {
148        &&& pte.paddr() % PAGE_SIZE == 0
149        &&& pte.paddr() < MAX_PADDR
150        &&& !pte.is_present() ==> {
151            &&& self.is_absent()
152            &&& parent_level > 1 ==> !pte.is_last(parent_level)
153        }
154        &&& pte.is_present() && !pte.is_last(parent_level) ==> {
155            &&& self.is_node()
156            &&& meta_to_frame(self.node.unwrap().meta_perm.addr()) == pte.paddr()
157        }
158        &&& pte.is_present() && pte.is_last(parent_level) ==> {
159            &&& self.is_frame()
160            &&& self.frame.unwrap().mapped_pa == pte.paddr()
161            &&& self.frame.unwrap().prop == pte.prop()
162        }
163    }
164
165    /// When owner is absent and pte is the absent PTE with valid paddr, match_pte holds.
166    pub proof fn absent_match_pte(owner: Self, pte: C::E, parent_level: PagingLevel)
167        requires
168            owner.is_absent(),
169            pte == C::E::new_absent_spec(),
170            pte.paddr() % PAGE_SIZE == 0,
171            pte.paddr() < MAX_PADDR,
172        ensures
173            owner.match_pte(pte, parent_level),
174    {
175        C::E::new_properties();
176        assert(!pte.is_present());
177        if parent_level > 1 {
178            assert(!pte.is_last(parent_level));
179        }
180        if pte.is_present() && !pte.is_last(parent_level) {
181            assert(pte.is_present());
182            assert(!pte.is_present());
183        }
184        if pte.is_present() && pte.is_last(parent_level) {
185            assert(pte.is_present());
186            assert(!pte.is_present());
187        }
188    }
189
190    pub proof fn last_pte_implies_frame_match(self, pte: C::E, parent_level: PagingLevel)
191        requires
192            self.inv(),
193            self.match_pte(pte, parent_level),
194            1 < parent_level,
195            pte.is_last(parent_level),
196        ensures
197            self.is_frame(),
198            self.frame.unwrap().mapped_pa == pte.paddr(),
199            self.frame.unwrap().prop == pte.prop(),
200    {
201        if !pte.is_present() {
202            assert(self.is_absent());
203            assert(!pte.is_last(parent_level));
204            assert(false);
205        }
206        assert(self.is_frame());
207        assert(self.frame.unwrap().mapped_pa == pte.paddr());
208        assert(self.frame.unwrap().prop == pte.prop());
209    }
210
211    pub proof fn huge_frame_split_child_at(self, regions: MetaRegionOwners, idx: usize)
212        requires
213            self.inv(),
214            self.is_frame(),
215            regions.inv(),
216            1 < self.parent_level < NR_LEVELS,
217            idx < NR_ENTRIES,
218        ensures
219            self.frame.unwrap().mapped_pa + idx * page_size((self.parent_level - 1) as PagingLevel) < MAX_PADDR,
220            ((self.frame.unwrap().mapped_pa + idx * page_size((self.parent_level - 1) as PagingLevel)) as Paddr)
221                % page_size((self.parent_level - 1) as PagingLevel) == 0,
222            ((self.frame.unwrap().mapped_pa + idx * page_size((self.parent_level - 1) as PagingLevel)) as Paddr)
223                + page_size((self.parent_level - 1) as PagingLevel) <= MAX_PADDR,
224            ((self.frame.unwrap().mapped_pa + idx * page_size((self.parent_level - 1) as PagingLevel)) as Paddr) % PAGE_SIZE == 0,
225    {
226        let pa = self.frame.unwrap().mapped_pa;
227        let child_pa = (pa + idx * page_size((self.parent_level - 1) as PagingLevel)) as Paddr;
228        assert(self.parent_level == 2 || self.parent_level == 3);
229        assert(NR_ENTRIES == 512) by {
230            crate::specs::arch::paging_consts::lemma_nr_subpage_per_huge_eq_nr_entries();
231        };
232        assert(crate::mm::nr_subpage_per_huge::<PagingConsts>() == 512usize) by {
233            crate::specs::arch::paging_consts::lemma_nr_subpage_per_huge_eq_nr_entries();
234        };
235        vstd_extra::external::ilog2::lemma_usize_ilog2_to32();
236        crate::specs::mm::page_table::cursor::page_size_lemmas::lemma_page_size_spec_level1();
237        assert(512usize.ilog2() == 9);
238        assert(crate::mm::nr_subpage_per_huge::<PagingConsts>().ilog2() == 512usize.ilog2());
239        vstd::arithmetic::power2::lemma2_to64();
240        if self.parent_level == 2 {
241            assert(page_size_spec(1) == 4096);
242            assert(page_size_spec(2) == (PAGE_SIZE * pow2((512usize.ilog2() * 1usize) as nat)) as usize);
243            assert(page_size_spec(2) == (4096 * pow2(9)) as usize);
244            assert(page_size_spec(2) == 2097152);
245            assert(pa % page_size(2) == 0);
246            crate::specs::mm::page_table::cursor::page_size_lemmas::lemma_page_size_divides(1, 2);
247            assert(child_pa % page_size(1) == 0);
248            assert(child_pa + page_size(1) <= MAX_PADDR) by {
249                assert(idx < 512);
250                assert(idx * 4096 + 4096 <= 2097152);
251                assert(child_pa + page_size(1) <= pa + page_size(2));
252            };
253        } else {
254            assert(self.parent_level == 3);
255            assert(page_size_spec(2) == (PAGE_SIZE * pow2((512usize.ilog2() * 1usize) as nat)) as usize);
256            assert(page_size_spec(2) == (4096 * pow2(9)) as usize);
257            assert(page_size_spec(2) == 2097152);
258            assert(page_size_spec(3) == (PAGE_SIZE * pow2((512usize.ilog2() * 2usize) as nat)) as usize);
259            assert(page_size_spec(3) == (4096 * pow2(18)) as usize);
260            assert(page_size_spec(3) == 1073741824);
261            assert(pa % page_size(3) == 0);
262            assert(pa % PAGE_SIZE == 0);
263            crate::specs::mm::page_table::cursor::page_size_lemmas::lemma_va_align_page_size(pa, 2);
264            assert(child_pa == pa + idx * page_size(2));
265            vstd::arithmetic::div_mod::lemma_mod_multiples_basic(idx as int, page_size(2) as int);
266            vstd::arithmetic::div_mod::lemma_add_mod_noop(
267                pa as int,
268                (idx * page_size(2)) as int,
269                page_size(2) as int,
270            );
271            assert(child_pa % page_size(2) == 0);
272            assert(child_pa + page_size(2) <= MAX_PADDR) by {
273                assert(idx < 512);
274                assert(idx * 2097152 + 2097152 <= 1073741824);
275                assert(child_pa + page_size(2) <= pa + page_size(3));
276            };
277        }
278        assert(child_pa < MAX_PADDR);
279        assert(child_pa % PAGE_SIZE == 0);
280    }
281
282    pub open spec fn expected_raw_count(self) -> usize {
283        if self.in_scope {
284            0
285        } else {
286            1
287        }
288    }
289
290    pub open spec fn relate_region(self, regions: MetaRegionOwners) -> bool {
291        if self.is_node() {
292            let idx = frame_to_index(self.meta_slot_paddr().unwrap());
293            &&& regions.slot_owners[idx].inner_perms.ref_count.value() != REF_COUNT_UNUSED
294            &&& regions.slot_owners[idx].raw_count == self.expected_raw_count()
295            &&& regions.slot_owners[idx].self_addr == self.node.unwrap().meta_perm.addr()
296            &&& self.node.unwrap().meta_perm.points_to.value().wf(regions.slot_owners[idx])
297            &&& regions.slot_owners[idx].path_if_in_pt is Some ==>
298                regions.slot_owners[idx].path_if_in_pt.unwrap() == self.path
299        } else if self.is_frame() {
300            let idx = frame_to_index(self.meta_slot_paddr().unwrap());
301            &&& self.frame.unwrap().slot_perm.addr() == meta_addr(idx)
302            &&& self.frame.unwrap().slot_perm.is_init()
303            &&& self.frame.unwrap().slot_perm.value().wf(regions.slot_owners[idx])
304            &&& regions.slot_owners[idx].inner_perms.ref_count.value() != REF_COUNT_UNUSED
305            &&& regions.slot_owners[idx].path_if_in_pt is Some ==>
306                regions.slot_owners[idx].path_if_in_pt.unwrap() == self.path
307        } else {
308            true
309        }
310    }
311
312    /// PointsTo uniqueness: if meta slot `free_idx` is in the free pool (`regions.slots`),
313    /// no active page table entry can own a PointsTo at the same slot address.
314    /// Justified by Verus's linear ownership of `PointsTo<MetaSlot>`:
315    /// the slot's PointsTo is either in `regions.slots` OR held by an active entry, never both.
316    pub axiom fn active_entry_not_in_free_pool(
317        entry: Self,
318        regions: MetaRegionOwners,
319        free_idx: usize,
320    )
321        requires
322            regions.inv(),
323            entry.inv(),
324            entry.relate_region(regions),
325            regions.slots.contains_key(free_idx),
326            entry.meta_slot_paddr() is Some,
327        ensures
328            frame_to_index(entry.meta_slot_paddr().unwrap()) != free_idx;
329
330    pub axiom fn get_path(self) -> tracked TreePath<NR_ENTRIES>
331        returns self.path;
332
333    pub open spec fn meta_slot_paddr(self) -> Option<Paddr> {
334        if self.is_node() {
335            Some(meta_to_frame(self.node.unwrap().meta_perm.addr()))
336        } else if self.is_frame() {
337            Some(self.frame.unwrap().mapped_pa)
338        } else {
339            None
340        }
341    }
342
343    pub open spec fn meta_slot_paddr_neq(self, other: Self) -> bool {
344        self.meta_slot_paddr() is Some ==>
345        other.meta_slot_paddr() is Some ==>
346        self.meta_slot_paddr().unwrap() != other.meta_slot_paddr().unwrap()
347    }
348
349    /// `relate_region` only uses `regions.slot_owners`, not `regions.slots`.
350    /// So if two `MetaRegionOwners` have the same `slot_owners`, `relate_region` transfers.
351    pub proof fn relate_region_slot_owners_only(self, r0: MetaRegionOwners, r1: MetaRegionOwners)
352        requires
353            self.relate_region(r0),
354            r0.slot_owners == r1.slot_owners,
355        ensures
356            self.relate_region(r1),
357    {
358        // relate_region is an open spec fn referencing only r.slot_owners[idx],
359        // so equality of slot_owners makes the two predicates equivalent.
360    }
361
362    /// If `relate_region(r0)` holds and `r1` differs from `r0` only at one slot index
363    /// that this entry does not reference, then `relate_region(r1)` also holds.
364    pub proof fn relate_region_one_slot_changed(self, r0: MetaRegionOwners, r1: MetaRegionOwners, changed_idx: usize)
365        requires
366            self.relate_region(r0),
367            forall |i: usize| #![trigger r1.slot_owners[i]]
368                i != changed_idx ==> r0.slot_owners[i] == r1.slot_owners[i],
369            r0.slot_owners.dom() =~= r1.slot_owners.dom(),
370            self.meta_slot_paddr() is Some ==>
371                frame_to_index(self.meta_slot_paddr().unwrap()) != changed_idx,
372        ensures
373            self.relate_region(r1),
374    {
375        // relate_region only reads slot_owners[frame_to_index(self.meta_slot_paddr().unwrap())]
376        // which is unchanged since frame_to_index != changed_idx.
377    }
378
379    /// Under `relate_region` + `path_if_in_pt is Some`, two entries with the same physical
380    /// address must have the same path. Equivalently, different paths ↔ different paddrs.
381    /// This is the fundamental paddr-uniqueness invariant: `path_if_in_pt` encodes the
382    /// unique path for each physical address in the page table.
383    pub proof fn same_paddr_implies_same_path(self, other: Self, regions: MetaRegionOwners)
384        requires
385            self.meta_slot_paddr() is Some,
386            self.meta_slot_paddr() == other.meta_slot_paddr(),
387            self.relate_region(regions),
388            other.relate_region(regions),
389            regions.slot_owners[
390                frame_to_index(self.meta_slot_paddr().unwrap())
391            ].path_if_in_pt is Some,
392        ensures
393            self.path == other.path,
394    {
395        let pa = self.meta_slot_paddr().unwrap();
396        let idx = frame_to_index(pa);
397        // relate_region for both self and other: path_if_in_pt is Some => path_if_in_pt == self.path
398        // The precondition gives path_if_in_pt is Some, so the conditional fires.
399        assert(regions.slot_owners[idx].path_if_in_pt.unwrap() == self.path);
400        assert(regions.slot_owners[idx].path_if_in_pt.unwrap() == other.path);
401    }
402
403    /// `relate_region` is preserved when only `ref_count.value()` changes at this entry's slot.
404    pub proof fn relate_region_rc_value_changed(self, r0: MetaRegionOwners, r1: MetaRegionOwners)
405        requires
406            self.relate_region(r0),
407            self.meta_slot_paddr() is Some,
408            ({
409                let idx = frame_to_index(self.meta_slot_paddr().unwrap());
410                &&& r1.slot_owners[idx].inner_perms.ref_count.id()
411                    == r0.slot_owners[idx].inner_perms.ref_count.id()
412                &&& r1.slot_owners[idx].inner_perms.ref_count.value()
413                    != crate::specs::mm::frame::meta_owners::REF_COUNT_UNUSED
414                &&& r1.slot_owners[idx].inner_perms.storage
415                    == r0.slot_owners[idx].inner_perms.storage
416                &&& r1.slot_owners[idx].inner_perms.vtable_ptr
417                    == r0.slot_owners[idx].inner_perms.vtable_ptr
418                &&& r1.slot_owners[idx].inner_perms.in_list
419                    == r0.slot_owners[idx].inner_perms.in_list
420                &&& r1.slot_owners[idx].path_if_in_pt == r0.slot_owners[idx].path_if_in_pt
421                &&& r1.slot_owners[idx].self_addr == r0.slot_owners[idx].self_addr
422                &&& r1.slot_owners[idx].raw_count == r0.slot_owners[idx].raw_count
423            }),
424        ensures
425            self.relate_region(r1),
426    {
427    }
428
429    /// Two nodes satisfying relate_region with the same regions have different addresses
430    /// if and only if they have different paths.
431    pub proof fn nodes_different_paths_different_addrs(
432        self,
433        other: Self,
434        regions: MetaRegionOwners,
435    )
436        requires
437            self.is_node(),
438            other.is_node(),
439            self.relate_region(regions),
440            self.meta_slot_paddr() is Some ==> regions.slot_owners[frame_to_index(self.meta_slot_paddr().unwrap())].path_if_in_pt is Some,
441            other.meta_slot_paddr() is Some ==> regions.slot_owners[frame_to_index(other.meta_slot_paddr().unwrap())].path_if_in_pt is Some,
442            other.relate_region(regions),
443            self.path != other.path,
444        ensures
445            self.node.unwrap().meta_perm.addr() != other.node.unwrap().meta_perm.addr(),
446    {
447        let self_addr = self.node.unwrap().meta_perm.addr();
448        let other_addr = other.node.unwrap().meta_perm.addr();
449        let self_idx = frame_to_index(meta_to_frame(self_addr));
450        let other_idx = frame_to_index(meta_to_frame(other_addr));
451
452        if self_addr == other_addr {
453            assert(regions.slot_owners[self_idx].path_if_in_pt == Some(self.path));
454            assert(regions.slot_owners[other_idx].path_if_in_pt == Some(other.path));
455//            assert(Some(self.path) == Some(other.path));
456            assert(self.path == other.path);
457            assert(false); // Contradiction
458        }
459    }
460}
461
462impl<C: PageTableConfig> EntryOwner<C> {
463    /// Structural invariant without `!in_scope`. Used by `Child::invariants`
464    /// for entries that have been taken out of the tree (`in_scope == true`).
465    pub open spec fn inv_base(self) -> bool {
466        &&& self.node is Some ==> {
467            &&& self.frame is None
468            &&& self.locked is None
469            &&& self.node.unwrap().inv()
470            &&& !self.absent
471        }
472        &&& self.frame is Some ==> {
473            &&& self.node is None
474            &&& self.locked is None
475            &&& !self.absent
476            &&& self.frame.unwrap().mapped_pa % PAGE_SIZE == 0
477            &&& self.frame.unwrap().mapped_pa < MAX_PADDR
478            &&& self.frame.unwrap().size == page_size(self.parent_level)
479            &&& self.frame.unwrap().mapped_pa % page_size(self.parent_level) == 0
480            &&& self.frame.unwrap().mapped_pa + page_size(self.parent_level) <= MAX_PADDR
481        }
482        &&& self.locked is Some ==> {
483            &&& self.frame is None
484            &&& self.node is None
485            &&& !self.absent
486        }
487        &&& self.path.inv()
488    }
489}
490
491impl<C: PageTableConfig> Inv for EntryOwner<C> {
492    open spec fn inv(self) -> bool {
493        &&& !self.in_scope
494        &&& self.inv_base()
495    }
496}
497
498impl<C: PageTableConfig> View for EntryOwner<C> {
499    type V = EntryView<C>;
500
501    #[verifier::external_body]
502    open spec fn view(&self) -> <Self as View>::V {
503        if let Some(frame) = self.frame {
504            EntryView::Leaf {
505                leaf: LeafPageTableEntryView {
506                    map_va: vaddr(self.path) as int,
507                    //                    frame_pa: self.base_addr as int,
508                    //                    in_frame_index: self.index as int,
509                    map_to_pa: frame.mapped_pa as int,
510                    level: self.path.len() as u8,
511                    prop: frame.prop,
512                    phantom: PhantomData,
513                },
514            }
515        } else if let Some(node) = self.node {
516            EntryView::Intermediate {
517                node: IntermediatePageTableEntryView {
518                    map_va: vaddr(self.path) as int,
519                    //                    frame_pa: self.base_addr as int,
520                    //                    in_frame_index: self.index as int,
521                    map_to_pa: meta_to_frame(node.meta_perm.addr()) as int,
522                    level: self.path.len() as u8,
523                    phantom: PhantomData,
524                },
525            }
526        } else if let Some(view) = self.locked {
527            EntryView::LockedSubtree { views: view@ }
528        } else {
529            EntryView::Absent
530        }
531    }
532}
533
534impl<C: PageTableConfig> InvView for EntryOwner<C> {
535    proof fn view_preserves_inv(self) {
536        admit()
537    }
538}
539
540impl<'rcu, C: PageTableConfig> OwnerOf for Entry<'rcu, C> {
541    type Owner = EntryOwner<C>;
542
543    open spec fn wf(self, owner: Self::Owner) -> bool {
544        &&& self.idx < NR_ENTRIES
545        &&& owner.match_pte(self.pte, owner.parent_level)
546        &&& self.pte.paddr() % PAGE_SIZE == 0
547        &&& self.pte.paddr() < MAX_PADDR
548    }
549}
550
551} // verus!