ostd/specs/mm/page_table/
mod.rs

1#![allow(hidden_glob_reexports)]
2
3pub mod cursor;
4pub mod node;
5mod owners;
6mod view;
7
8pub use cursor::*;
9pub use node::*;
10pub use owners::*;
11pub use view::*;
12
13use vstd::arithmetic::power2::pow2;
14use vstd::prelude::*;
15use vstd_extra::arithmetic::*;
16use vstd_extra::ghost_tree::TreePath;
17use vstd_extra::ownership::*;
18
19use crate::mm::page_table::page_size;
20use crate::mm::page_table::PageTableConfig;
21use crate::mm::{PagingLevel, Vaddr};
22use crate::specs::arch::mm::{NR_ENTRIES, NR_LEVELS, PAGE_SIZE};
23
24use align_ext::AlignExt;
25
26verus! {
27
28/// An abstract representation of a virtual address as a sequence of indices, representing the
29/// values of the bit-fields that index into each level of the page table.
30/// `offset` is the lowest 12 bits (the offset into a 4096 byte page)
31/// `index[0]` is the next 9 bits (index into the 512 entries of the first level page table)
32/// and so on up to index[NR_LEVELS-1].
33pub struct AbstractVaddr {
34    pub offset: int,
35    pub index: Map<int, int>,
36}
37
38impl Inv for AbstractVaddr {
39    open spec fn inv(self) -> bool {
40        &&& 0 <= self.offset < PAGE_SIZE
41        &&& forall|i: int|
42            #![trigger self.index.contains_key(i)]
43            0 <= i < NR_LEVELS ==> {
44                &&& self.index.contains_key(i)
45                &&& 0 <= self.index[i]
46                &&& self.index[i] < NR_ENTRIES
47            }
48    }
49}
50
51impl AbstractVaddr {
52    /// Extract the AbstractVaddr components from a concrete virtual address.
53    /// - offset = lowest 12 bits
54    /// - index[i] = bits (12 + 9*i) to (12 + 9*(i+1) - 1) for each level
55    pub open spec fn from_vaddr(va: Vaddr) -> Self {
56        AbstractVaddr {
57            offset: (va % PAGE_SIZE) as int,
58            index: Map::new(
59                |i: int| 0 <= i < NR_LEVELS,
60                |i: int| ((va / pow2((12 + 9 * i) as nat) as usize) % NR_ENTRIES) as int,
61            ),
62        }
63    }
64
65    pub proof fn from_vaddr_wf(va: Vaddr)
66        ensures
67            AbstractVaddr::from_vaddr(va).inv(),
68    {
69        let abs = AbstractVaddr::from_vaddr(va);
70        assert forall|i: int| #![trigger abs.index.contains_key(i)] 0 <= i < NR_LEVELS implies {
71            &&& abs.index.contains_key(i)
72            &&& 0 <= abs.index[i]
73            &&& abs.index[i] < NR_ENTRIES
74        } by {
75            // index[i] = (va / 2^(12+9*i)) % NR_ENTRIES, which is in [0, NR_ENTRIES)
76        };
77    }
78
79    /// Reconstruct the concrete virtual address from the AbstractVaddr components.
80    /// va = offset + sum(index[i] * 2^(12 + 9*i)) for i in 0..NR_LEVELS
81    pub open spec fn to_vaddr(self) -> Vaddr {
82        (self.offset + self.to_vaddr_indices(0)) as Vaddr
83    }
84
85    /// Helper: sum of index[i] * 2^(12 + 9*i) for i in start..NR_LEVELS
86    pub open spec fn to_vaddr_indices(self, start: int) -> int
87        decreases NR_LEVELS - start,
88        when start <= NR_LEVELS
89    {
90        if start >= NR_LEVELS {
91            0
92        } else {
93            self.index[start] * pow2((12 + 9 * start) as nat) as int + self.to_vaddr_indices(
94                start + 1,
95            )
96        }
97    }
98
99    /// reflect(self, va) holds when self is the abstract representation of va.
100    pub open spec fn reflect(self, va: Vaddr) -> bool {
101        self == Self::from_vaddr(va)
102    }
103
104    /// If self reflects va, then self.to_vaddr() == va and self == from_vaddr(va).
105    /// The first ensures requires proving the round-trip property: from_vaddr(va).to_vaddr() == va.
106    pub broadcast proof fn reflect_prop(self, va: Vaddr)
107        requires
108            self.inv(),
109            self.reflect(va),
110        ensures
111            #[trigger] self.to_vaddr() == va,
112            #[trigger] Self::from_vaddr(va) == self,
113    {
114        // self.reflect(va) means self == from_vaddr(va)
115        // So self.to_vaddr() == from_vaddr(va).to_vaddr()
116        // We need: from_vaddr(va).to_vaddr() == va (round-trip property)
117        Self::from_vaddr_to_vaddr_roundtrip(va);
118    }
119
120    /// Round-trip property: extracting and reconstructing a VA gives back the original.
121    pub proof fn from_vaddr_to_vaddr_roundtrip(va: Vaddr)
122        ensures
123            Self::from_vaddr(va).to_vaddr() == va,
124    {
125        // va = offset + sum(index[i] * 2^(12+9*i))
126        // from_vaddr extracts: offset = va % 2^12, index[i] = (va / 2^(12+9*i)) % 2^9
127        // to_vaddr reconstructs: offset + sum(index[i] * 2^(12+9*i))
128        // This equals va by the uniqueness of positional representation in mixed radix.
129        admit()
130    }
131
132    /// from_vaddr(va) reflects va (by definition of reflect).
133    pub broadcast proof fn reflect_from_vaddr(va: Vaddr)
134        ensures
135            #[trigger] Self::from_vaddr(va).reflect(va),
136            #[trigger] Self::from_vaddr(va).inv(),
137    {
138        Self::from_vaddr_wf(va);
139    }
140
141    /// If self.inv(), then self reflects self.to_vaddr().
142    pub broadcast proof fn reflect_to_vaddr(self)
143        requires
144            self.inv(),
145        ensures
146            #[trigger] self.reflect(self.to_vaddr()),
147    {
148        Self::to_vaddr_from_vaddr_roundtrip(self);
149    }
150
151    /// Inverse round-trip: to_vaddr then from_vaddr gives back the original AbstractVaddr.
152    pub proof fn to_vaddr_from_vaddr_roundtrip(abs: Self)
153        requires
154            abs.inv(),
155        ensures
156            Self::from_vaddr(abs.to_vaddr()) == abs,
157    {
158        // Similar reasoning: the positional representation is unique
159        admit()
160    }
161
162    /// If two AbstractVaddrs reflect the same va, they are equal.
163    pub broadcast proof fn reflect_eq(self, other: Self, va: Vaddr)
164        requires
165            #[trigger] self.reflect(va),
166            #[trigger] other.reflect(va),
167        ensures
168            self == other,
169    {
170    }
171
172    pub open spec fn align_down(self, level: int) -> Self
173        decreases level,
174        when level >= 1
175    {
176        if level == 1 {
177            AbstractVaddr { offset: 0, index: self.index }
178        } else {
179            let tmp = self.align_down(level - 1);
180            AbstractVaddr { index: tmp.index.insert(level - 2, 0), ..tmp }
181        }
182    }
183
184    pub proof fn align_down_inv(self, level: int)
185        requires
186            1 <= level < NR_LEVELS,
187            self.inv(),
188        ensures
189            self.align_down(level).inv(),
190            forall|i: int|
191                level <= i < NR_LEVELS ==> #[trigger] self.index[i - 1] == self.align_down(
192                    level,
193                ).index[i - 1],
194        decreases level,
195    {
196        if level == 1 {
197        } else {
198            let tmp = self.align_down(level - 1);
199            self.align_down_inv(level - 1);
200        }
201    }
202
203    pub proof fn align_down_shape(self, level: int)
204        requires
205            1 <= level <= NR_LEVELS,
206            self.inv(),
207        ensures
208            self.align_down(level).inv(),
209            self.align_down(level).offset == 0,
210            forall|i: int| 0 <= i < level - 1 ==> #[trigger] self.align_down(level).index[i] == 0,
211            forall|i: int|
212                level - 1 <= i < NR_LEVELS ==> #[trigger] self.align_down(level).index[i]
213                    == self.index[i],
214        decreases level,
215    {
216        if level == 1 {
217            assert forall|i: int| 0 <= i < level - 1 implies #[trigger] self.align_down(
218                level,
219            ).index[i] == 0 by {};
220            assert forall|i: int| level - 1 <= i < NR_LEVELS implies #[trigger] self.align_down(
221                level,
222            ).index[i] == self.index[i] by {};
223        } else {
224            let tmp = self.align_down(level - 1);
225            self.align_down_shape(level - 1);
226            assert forall|i: int| 0 <= i < level - 1 implies #[trigger] self.align_down(
227                level,
228            ).index[i] == 0 by {};
229            assert forall|i: int| level - 1 <= i < NR_LEVELS implies #[trigger] self.align_down(
230                level,
231            ).index[i] == self.index[i] by {
232            };
233        }
234    }
235
236    pub proof fn to_vaddr_indices_drop_zero_range(self, from: int, to: int)
237        requires
238            self.inv(),
239            0 <= from <= to <= NR_LEVELS,
240            forall|i: int| from <= i < to ==> self.index[i] == 0,
241        ensures
242            self.to_vaddr_indices(from) == self.to_vaddr_indices(to),
243        decreases to - from,
244    {
245        if from < to {
246            self.to_vaddr_indices_drop_zero_range(from + 1, to);
247        }
248    }
249
250    pub proof fn to_vaddr_indices_eq_if_indices_eq(self, other: Self, start: int)
251        requires
252            self.inv(),
253            other.inv(),
254            0 <= start <= NR_LEVELS,
255            forall|i: int| start <= i < NR_LEVELS ==> self.index[i] == other.index[i],
256        ensures
257            self.to_vaddr_indices(start) == other.to_vaddr_indices(start),
258        decreases NR_LEVELS - start,
259    {
260        if start < NR_LEVELS {
261            self.to_vaddr_indices_eq_if_indices_eq(other, start + 1);
262        }
263    }
264
265    /// If two AbstractVaddrs share the same indices at levels >= level-1 (i.e., index[level-1] and above),
266    /// then aligning them down to `level` gives the same to_vaddr() result.
267    /// This is because align_down(level) zeroes offset and indices 0 through level-2,
268    /// so only indices level-1 and above affect the to_vaddr() result.
269    pub proof fn align_down_to_vaddr_eq_if_upper_indices_eq(self, other: Self, level: int)
270        requires
271            1 <= level <= NR_LEVELS,
272            self.inv(),
273            other.inv(),
274            // Indices at level-1 and above are equal
275            forall|i: int| level - 1 <= i < NR_LEVELS ==> self.index[i] == other.index[i],
276        ensures
277            self.align_down(level).to_vaddr() == other.align_down(level).to_vaddr(),
278        decreases level,
279    {
280        let lhs = self.align_down(level);
281        let rhs = other.align_down(level);
282
283        self.align_down_shape(level);
284        other.align_down_shape(level);
285
286        lhs.to_vaddr_indices_drop_zero_range(0, level - 1);
287        rhs.to_vaddr_indices_drop_zero_range(0, level - 1);
288        lhs.to_vaddr_indices_eq_if_indices_eq(rhs, level - 1);
289
290    }
291
292    pub axiom fn align_down_concrete(self, level: int)
293        requires
294            1 <= level <= NR_LEVELS,
295        ensures
296            self.align_down(level).reflect(
297                nat_align_down(
298                    self.to_vaddr() as nat,
299                    page_size(level as PagingLevel) as nat,
300                ) as Vaddr,
301            ),
302    ;
303
304    /// `align_down(0)` is not recursively reachable by the spec (defined only for level >= 1),
305    /// so we axiomatize its key properties: it preserves `inv()` and all index values.
306    /// This is consistent with the intended semantics of level-0 "alignment" being a no-op
307    /// on the index array (it would only zero a non-existent sub-page offset).
308    pub axiom fn align_down_zero_properties(self)
309        requires
310            self.inv(),
311        ensures
312            self.align_down(0).inv(),
313            forall|i: int|
314                #![auto]
315                0 <= i < NR_LEVELS ==> self.align_down(0).index[i] == self.index[i],
316    ;
317
318    /// Two virtual addresses in the same page_size(level+1) aligned block
319    /// have the same from_vaddr().index[i] for all i >= level.
320    ///
321    /// page_size(level + 1) = 2^(12 + 9*level). Being in the same aligned block means
322    /// va / 2^(12 + 9*level) is equal, so (va / 2^(12+9*i)) % 512 is equal for i >= level.
323    pub axiom fn same_node_indices_match(
324        va1: Vaddr,
325        va2: Vaddr,
326        node_start: Vaddr,
327        level: PagingLevel,
328    )
329        requires
330            1 <= level,
331            level < NR_LEVELS,
332            node_start <= va1,
333            va1 < node_start + page_size((level + 1) as PagingLevel),
334            node_start <= va2,
335            va2 < node_start + page_size((level + 1) as PagingLevel),
336            node_start as nat % page_size((level + 1) as PagingLevel) as nat == 0,
337        ensures
338            forall|i: int|
339                #![auto]
340                level as int <= i < NR_LEVELS ==> Self::from_vaddr(va1).index[i]
341                    == Self::from_vaddr(va2).index[i],
342    ;
343
344    pub open spec fn align_up(self, level: int) -> Self {
345        let lower_aligned = self.align_down(level - 1);
346        lower_aligned.next_index(level)
347    }
348
349    pub axiom fn align_up_concrete(self, level: int)
350        requires
351            1 <= level <= NR_LEVELS,
352        ensures
353            self.align_up(level).reflect(
354                nat_align_up(
355                    self.to_vaddr() as nat,
356                    page_size(level as PagingLevel) as nat,
357                ) as Vaddr,
358            ),
359    ;
360
361    pub axiom fn align_diff(self, level: int)
362        requires
363            1 <= level <= NR_LEVELS,
364        ensures
365            nat_align_up(self.to_vaddr() as nat, page_size(level as PagingLevel) as nat)
366                == nat_align_down(self.to_vaddr() as nat, page_size(level as PagingLevel) as nat)
367                + page_size(level as PagingLevel),
368            nat_align_up(self.to_vaddr() as nat, page_size(level as PagingLevel) as nat)
369                < usize::MAX,
370    ;
371
372    pub open spec fn next_index(self, level: int) -> Self
373        decreases NR_LEVELS - level,
374        when 1 <= level <= NR_LEVELS
375    {
376        let index = self.index[level - 1];
377        let next_index = index + 1;
378        if next_index == NR_ENTRIES && level < NR_LEVELS {
379            let next_va = Self { offset: self.offset, index: self.index.insert(level - 1, 0) };
380            next_va.next_index(level + 1)
381        } else if next_index == NR_ENTRIES && level == NR_LEVELS {
382            Self { offset: self.offset, index: self.index.insert(level - 1, 0) }
383        } else {
384            Self { offset: self.offset, index: self.index.insert(level - 1, next_index) }
385        }
386    }
387
388    pub open spec fn wrapped(self, start_level: int, level: int) -> bool
389        decreases NR_LEVELS - level,
390        when 1 <= start_level <= level <= NR_LEVELS
391    {
392        &&& self.next_index(start_level).index[level - 1] == 0 ==> {
393            &&& self.index[level - 1] + 1 == NR_ENTRIES
394            &&& if level < NR_LEVELS {
395                self.wrapped(start_level, level + 1)
396            } else {
397                true
398            }
399        }
400        &&& self.next_index(start_level).index[level - 1] != 0 ==> self.index[level - 1] + 1
401            < NR_ENTRIES
402    }
403
404    pub proof fn use_wrapped(self, start_level: int, level: int)
405        requires
406            1 <= start_level <= level < NR_LEVELS,
407            self.wrapped(start_level, level),
408            self.next_index(start_level).index[level - 1] == 0,
409        ensures
410            self.index[level - 1] + 1 == NR_ENTRIES,
411    {
412    }
413
414    pub proof fn wrapped_unwrap(self, start_level: int, level: int)
415        requires
416            1 <= start_level <= level < NR_LEVELS,
417            self.wrapped(start_level, level),
418            self.next_index(start_level).index[level - 1] == 0,
419        ensures
420            self.wrapped(start_level, level + 1),
421    {
422    }
423
424    pub proof fn wrapped_after_carry_equiv(self, start_level: int, level: int)
425        requires
426            self.inv(),
427            1 <= start_level < level <= NR_LEVELS,
428            self.index[start_level - 1] + 1 == NR_ENTRIES,
429        ensures
430            ({
431                let next_va = Self {
432                    offset: self.offset,
433                    index: self.index.insert(start_level - 1, 0),
434                };
435                self.wrapped(start_level, level) == next_va.wrapped(start_level + 1, level)
436            }),
437        decreases NR_LEVELS - level,
438    {
439        let next_va = Self { offset: self.offset, index: self.index.insert(start_level - 1, 0) };
440        assert forall|i: int| start_level <= i < NR_LEVELS implies next_va.index[i]
441            == self.index[i] by {};
442        if level < NR_LEVELS {
443            self.wrapped_after_carry_equiv(start_level, level + 1);
444        }
445    }
446
447    pub proof fn next_index_preserves_lower_indices(self, start_level: int, lower_level: int)
448        requires
449            self.inv(),
450            1 <= lower_level < start_level <= NR_LEVELS,
451        ensures
452            self.next_index(start_level).index[lower_level - 1] == self.index[lower_level - 1],
453        decreases NR_LEVELS - start_level,
454    {
455        let index = self.index[start_level - 1];
456        let next_index = index + 1;
457        if next_index == NR_ENTRIES && start_level < NR_LEVELS {
458            let next_va = Self {
459                offset: self.offset,
460                index: self.index.insert(start_level - 1, 0),
461            };
462            next_va.next_index_preserves_lower_indices(start_level + 1, lower_level);
463        } else if next_index == NR_ENTRIES && start_level == NR_LEVELS {
464        } 
465    }
466
467    pub proof fn next_index_wrap_condition(self, level: int)
468        requires
469            self.inv(),
470            1 <= level <= NR_LEVELS,
471        ensures
472            self.wrapped(level, level),
473        decreases NR_LEVELS - level,
474    {
475        let index = self.index[level - 1];
476        let next_index = index + 1;
477        if next_index == NR_ENTRIES {
478            if level < NR_LEVELS {
479                let next_va = Self { offset: self.offset, index: self.index.insert(level - 1, 0) };
480                next_va.next_index_wrap_condition(level + 1);
481                self.wrapped_after_carry_equiv(level, level + 1);
482                next_va.next_index_preserves_lower_indices(level + 1, level);
483            } 
484        } else {
485            assert(self.index.contains_key(level - 1));
486        }
487    }
488
489    //
490    // === Connection to TreePath and concrete Vaddr ===
491    //
492    /// Computes the concrete vaddr from the abstract representation.
493    /// This matches the structure:
494    ///   index[NR_LEVELS-1] * 2^39 + index[NR_LEVELS-2] * 2^30 + ... + index[0] * 2^12 + offset
495    pub open spec fn compute_vaddr(self) -> Vaddr {
496        self.rec_compute_vaddr(0)
497    }
498
499    /// Helper for computing vaddr recursively from level i upward.
500    pub open spec fn rec_compute_vaddr(self, i: int) -> Vaddr
501        decreases NR_LEVELS - i,
502        when 0 <= i <= NR_LEVELS
503    {
504        if i >= NR_LEVELS {
505            self.offset as Vaddr
506        } else {
507            let shift = page_size((i + 1) as PagingLevel);
508            (self.index[i] * shift + self.rec_compute_vaddr(i + 1)) as Vaddr
509        }
510    }
511
512    /// Extracts a TreePath from this abstract vaddr, from the root down to the given level.
513    /// The path has length (NR_LEVELS - level), containing indices for paging levels NR_LEVELS..level+1.
514    /// - level=0: full path of length NR_LEVELS with indices for all levels
515    /// - level=3: path of length 1 with just the root index
516    ///
517    /// Path index mapping:
518    /// - path.index(0) = self.index[NR_LEVELS - 1]  (root level)
519    /// - path.index(i) = self.index[NR_LEVELS - 1 - i]
520    /// - path.index(NR_LEVELS - level - 1) = self.index[level]  (last entry)
521    pub open spec fn to_path(self, level: int) -> TreePath<NR_ENTRIES>
522        recommends
523            0 <= level < NR_LEVELS,
524    {
525        TreePath(self.rec_to_path(NR_LEVELS - 1, level))
526    }
527
528    /// Builds the path sequence from abstract_level down to bottom_level (both inclusive).
529    /// abstract_level and bottom_level refer to the index keys in self.index (0 = lowest level, NR_LEVELS-1 = root).
530    /// Returns indices in order from highest level (first in seq) to lowest level (last in seq).
531    pub open spec fn rec_to_path(self, abstract_level: int, bottom_level: int) -> Seq<usize>
532        decreases abstract_level - bottom_level,
533        when bottom_level <= abstract_level
534    {
535        if abstract_level < bottom_level {
536            seq![]
537        } else if abstract_level == bottom_level {
538            // Base case: just this one level
539            seq![self.index[abstract_level] as usize]
540        } else {
541            // Recursive case: place the current higher level first, then recurse downward.
542            seq![self.index[abstract_level] as usize].add(
543                self.rec_to_path(abstract_level - 1, bottom_level),
544            )
545        }
546    }
547
548    /// The vaddr of the path from this abstract vaddr equals the aligned vaddr at that level.
549    #[verifier::rlimit(400)]
550    pub proof fn to_path_vaddr(self, level: int)
551        requires
552            self.inv(),
553            0 <= level < NR_LEVELS,
554        ensures
555            vaddr(self.to_path(level)) == self.align_down(level + 1).compute_vaddr(),
556    {
557        self.to_path_inv(level);
558        self.to_path_len(level);
559        lemma_page_size_spec_level1();
560        vstd::arithmetic::power2::lemma2_to64();
561        vstd::arithmetic::power2::lemma2_to64_rest();
562        crate::specs::arch::paging_consts::lemma_nr_subpage_per_huge_eq_nr_entries();
563        vstd_extra::external::ilog2::lemma_usize_ilog2_to32();
564        let path = self.to_path(level);
565        if level == 3 {
566            let aligned = self.align_down(4);
567            self.align_down_shape(4);
568            self.to_path_index(3, 0);
569            path.index_satisfies_elem_inv(0);
570            assert(vaddr(path) == path.index(0) * 0x80_0000_0000usize) by {
571                assert(rec_vaddr(path, 0) == (vaddr_make::<NR_LEVELS>(0, path.index(0)) + rec_vaddr(
572                    path,
573                    1,
574                )) as usize);
575            };
576            assert(aligned.rec_compute_vaddr(3) == self.index[3] * 0x80_0000_0000usize) by {
577                assert(aligned.rec_compute_vaddr(3) == (aligned.index[3] * page_size(4)
578                    + aligned.rec_compute_vaddr(4)) as Vaddr);
579            };
580            assert(aligned.rec_compute_vaddr(1) == self.index[3] * 0x80_0000_0000usize) by {
581                assert(aligned.rec_compute_vaddr(1) == (aligned.index[1] * page_size(2)
582                    + aligned.rec_compute_vaddr(2)) as Vaddr);
583            };
584            assert(aligned.compute_vaddr() == aligned.rec_compute_vaddr(0));
585            assert(aligned.rec_compute_vaddr(0) == (aligned.index[0] * page_size(1)
586                + aligned.rec_compute_vaddr(1)) as Vaddr);
587            assert(vaddr(path) == aligned.compute_vaddr());
588        } else if level == 2 {
589            let aligned = self.align_down(3);
590            self.align_down_shape(3);
591            self.to_path_index(2, 0);
592            self.to_path_index(2, 1);
593            path.index_satisfies_elem_inv(0);
594            path.index_satisfies_elem_inv(1);
595            assert(vaddr(path) == path.index(0) * 0x80_0000_0000usize + path.index(1)
596                * 0x4000_0000usize) by {
597                assert(vaddr(path) == rec_vaddr(path, 0));
598                assert(rec_vaddr(path, 1) == (vaddr_make::<NR_LEVELS>(1, path.index(1)) + rec_vaddr(
599                    path,
600                    2,
601                )) as usize);
602            };
603            assert(aligned.rec_compute_vaddr(3) == self.index[3] * 0x80_0000_0000usize) by {
604                assert(aligned.rec_compute_vaddr(3) == (aligned.index[3] * page_size(4)
605                    + aligned.rec_compute_vaddr(4)) as Vaddr);
606            };
607            assert(aligned.rec_compute_vaddr(1) == self.index[2] * 0x4000_0000usize + self.index[3]
608                * 0x80_0000_0000usize) by {
609                assert(aligned.rec_compute_vaddr(1) == (aligned.index[1] * page_size(2)
610                    + aligned.rec_compute_vaddr(2)) as Vaddr);
611            };
612            assert(vaddr(path) == aligned.compute_vaddr());
613        } else if level == 1 {
614            let aligned = self.align_down(2);
615            self.align_down_shape(2);
616            self.to_path_index(1, 0);
617            self.to_path_index(1, 1);
618            self.to_path_index(1, 2);
619            path.index_satisfies_elem_inv(0);
620            path.index_satisfies_elem_inv(1);
621            path.index_satisfies_elem_inv(2);
622            assert(vaddr(path) == path.index(0) * 0x80_0000_0000usize + path.index(1)
623                * 0x4000_0000usize + path.index(2) * 0x20_0000usize) by {
624                assert(vaddr(path) == rec_vaddr(path, 0));
625                assert(rec_vaddr(path, 3) == 0);
626                assert(rec_vaddr(path, 2) == (vaddr_make::<NR_LEVELS>(2, path.index(2)) + rec_vaddr(path, 3)) as usize);
627                assert(rec_vaddr(path, 1) == (vaddr_make::<NR_LEVELS>(1, path.index(1)) + rec_vaddr(path, 2)) as usize);
628                assert(rec_vaddr(path, 0) == (vaddr_make::<NR_LEVELS>(0, path.index(0)) + rec_vaddr(path, 1)) as usize);
629                assert(vaddr_make::<NR_LEVELS>(0, path.index(0)) == 0x80_0000_0000usize * path.index(0)) by (compute);
630                assert(vaddr_make::<NR_LEVELS>(1, path.index(1)) == 0x4000_0000usize * path.index(1)) by (compute);
631                assert(vaddr_make::<NR_LEVELS>(2, path.index(2)) == 0x20_0000usize * path.index(2)) by (compute);
632            };
633            assert(aligned.rec_compute_vaddr(3) == self.index[3] * 0x80_0000_0000usize) by {
634                assert(aligned.rec_compute_vaddr(3) == (aligned.index[3] * page_size(4)
635                    + aligned.rec_compute_vaddr(4)) as Vaddr);
636            };
637            assert(aligned.rec_compute_vaddr(1) == self.index[1] * 0x20_0000usize + self.index[2]
638                * 0x4000_0000usize + self.index[3] * 0x80_0000_0000usize) by {
639                assert(aligned.rec_compute_vaddr(1) == (aligned.index[1] * page_size(2)
640                    + aligned.rec_compute_vaddr(2)) as Vaddr);
641            };
642            assert(aligned.compute_vaddr() == aligned.rec_compute_vaddr(0));
643            assert(aligned.rec_compute_vaddr(0) == (aligned.index[0] * page_size(1)
644                + aligned.rec_compute_vaddr(1)) as Vaddr);
645            assert(vaddr(path) == aligned.compute_vaddr());
646        } else {
647            let aligned = self.align_down(1);
648            self.align_down_shape(1);
649            self.to_path_index(0, 0);
650            self.to_path_index(0, 1);
651            self.to_path_index(0, 2);
652            self.to_path_index(0, 3);
653            path.index_satisfies_elem_inv(0);
654            path.index_satisfies_elem_inv(1);
655            path.index_satisfies_elem_inv(2);
656            path.index_satisfies_elem_inv(3);
657            assert(vaddr(path) == path.index(0) * 0x80_0000_0000usize + path.index(1)
658                * 0x4000_0000usize + path.index(2) * 0x20_0000usize + path.index(3) * 0x1000usize)
659                by {
660                assert(vaddr(path) == rec_vaddr(path, 0));
661                assert(rec_vaddr(path, 4) == 0);
662                assert(rec_vaddr(path, 2) == (vaddr_make::<NR_LEVELS>(2, path.index(2)) + rec_vaddr(
663                    path,
664                    3,
665                )) as usize);
666                assert(rec_vaddr(path, 1) == (vaddr_make::<NR_LEVELS>(1, path.index(1)) + rec_vaddr(
667                    path,
668                    2,
669                )) as usize);
670                assert(vaddr_make::<NR_LEVELS>(0, path.index(0)) == 0x80_0000_0000usize
671                    * path.index(0)) by (compute);
672                assert(vaddr_make::<NR_LEVELS>(1, path.index(1)) == 0x4000_0000usize * path.index(
673                    1,
674                )) by (compute);
675                assert(vaddr_make::<NR_LEVELS>(2, path.index(2)) == 0x20_0000usize * path.index(2))
676                    by (compute);
677                assert(vaddr_make::<NR_LEVELS>(3, path.index(3)) == 0x1000usize * path.index(3))
678                    by (compute);
679            };
680            assert(aligned.rec_compute_vaddr(4) == 0);
681            assert(aligned.rec_compute_vaddr(3) == self.index[3] * 0x80_0000_0000usize) by {
682                assert(aligned.rec_compute_vaddr(3) == (aligned.index[3] * page_size(4)
683                    + aligned.rec_compute_vaddr(4)) as Vaddr);
684            };
685            assert(aligned.rec_compute_vaddr(2) == self.index[2] * 0x4000_0000usize + self.index[3]
686                * 0x80_0000_0000usize) by {
687            };
688            assert(aligned.compute_vaddr() == self.index[0] * 0x1000usize + self.index[1]
689                * 0x20_0000usize + self.index[2] * 0x4000_0000usize + self.index[3]
690                * 0x80_0000_0000usize) by {
691                assert(aligned.compute_vaddr() == aligned.rec_compute_vaddr(0));
692                assert(aligned.rec_compute_vaddr(0) == (aligned.index[0] * page_size(1)
693                    + aligned.rec_compute_vaddr(1)) as Vaddr);
694            };
695            assert(vaddr(path) == aligned.compute_vaddr());
696        }
697    }
698
699    /// The concrete to_vaddr() equals the computed vaddr.
700    pub axiom fn to_vaddr_is_compute_vaddr(self)
701        requires
702            self.inv(),
703        ensures
704            self.to_vaddr() == self.compute_vaddr(),
705    ;
706
707    pub proof fn to_vaddr_indices_gap_bound(self, start: int)
708        requires
709            self.inv(),
710            0 <= start <= NR_LEVELS,
711        ensures
712            0 <= self.to_vaddr_indices(start),
713            self.to_vaddr_indices(start) + pow2((12 + 9 * start) as nat) as int <= pow2(
714                (12 + 9 * NR_LEVELS) as nat,
715            ) as int,
716        decreases NR_LEVELS - start,
717    {
718        vstd::arithmetic::power2::lemma2_to64();
719        vstd::arithmetic::power2::lemma2_to64_rest();
720        vstd::arithmetic::power2::lemma_pow2_pos((12 + 9 * start) as nat);
721        if start == NR_LEVELS {
722        } else {
723            let shift = pow2((12 + 9 * start) as nat) as int;
724            let next_shift = pow2((12 + 9 * (start + 1)) as nat) as int;
725            let top = pow2((12 + 9 * NR_LEVELS) as nat) as int;
726            self.to_vaddr_indices_gap_bound(start + 1);
727            assert(self.index.contains_key(start));
728                                                                                                                                                                          vstd::arithmetic::power2::lemma_pow2_adds((12 + 9 * start) as nat, 9nat);
729            vstd::arithmetic::mul::lemma_mul_inequality(self.index[start] + 1, 0x200int, shift);
730            vstd::arithmetic::mul::lemma_mul_is_distributive_add_other_way(
731                shift,
732                self.index[start],
733                1,
734            );
735        }
736    }
737
738    pub proof fn to_vaddr_bounded(self)
739        requires
740            self.inv(),
741        ensures
742            0 <= self.offset + self.to_vaddr_indices(0) < usize::MAX,
743            self.to_vaddr() as int == self.offset + self.to_vaddr_indices(0),
744    {
745        vstd::arithmetic::power2::lemma2_to64();
746        vstd::arithmetic::power2::lemma2_to64_rest();
747        self.to_vaddr_indices_gap_bound(0);
748        let shift0 = pow2(12nat) as int;
749        let top = pow2((12 + 9 * NR_LEVELS) as nat) as int;
750    }
751
752    pub proof fn index_increment_adds_page_size(self, level: int)
753        requires
754            self.inv(),
755            1 <= level <= NR_LEVELS,
756            self.index[level - 1] + 1 < NR_ENTRIES,
757        ensures
758            (Self {
759                index: self.index.insert(level - 1, self.index[level - 1] + 1),
760                ..self
761            }).to_vaddr() == self.to_vaddr() + page_size(level as PagingLevel),
762    {
763        let new_va = Self {
764            index: self.index.insert(level - 1, self.index[level - 1] + 1),
765            ..self
766        };
767        // Establish new_va.inv()
768        assert forall|i: int| #![trigger new_va.index.contains_key(i)] 0 <= i < NR_LEVELS implies {
769            &&& new_va.index.contains_key(i)
770            &&& 0 <= new_va.index[i]
771            &&& new_va.index[i] < NR_ENTRIES
772        } by {
773            // Use self.inv() to establish bounds on self.index[i]
774            assert(self.index.contains_key(i));
775        };
776        self.to_vaddr_bounded();
777        new_va.to_vaddr_bounded();
778        vstd::arithmetic::power2::lemma2_to64();
779        vstd::arithmetic::power2::lemma2_to64_rest();
780        if level == 1 {
781            lemma_page_size_spec_level1();
782            assert forall|i: int| 1 <= i < NR_LEVELS implies new_va.index[i] == self.index[i] by {};
783            new_va.to_vaddr_indices_eq_if_indices_eq(self, 1);
784        } else if level == 2 {
785            vstd_extra::external::ilog2::lemma_usize_ilog2_to32();
786            assert forall|i: int| 2 <= i < NR_LEVELS implies new_va.index[i] == self.index[i] by {};
787            new_va.to_vaddr_indices_eq_if_indices_eq(self, 2);
788            assert(self.to_vaddr_indices(0) == self.index[0] * pow2(12nat) as int
789                + self.to_vaddr_indices(1));
790            assert(new_va.to_vaddr_indices(1) == self.to_vaddr_indices(1) + 0x20_0000);
791        } else if level == 3 {
792            vstd_extra::external::ilog2::lemma_usize_ilog2_to32();
793            assert forall|i: int| 3 <= i < NR_LEVELS implies new_va.index[i] == self.index[i] by {};
794            new_va.to_vaddr_indices_eq_if_indices_eq(self, 3);
795            assert(new_va.to_vaddr_indices(2) == self.to_vaddr_indices(2) + 0x4000_0000);
796            assert(new_va.to_vaddr_indices(1) == self.to_vaddr_indices(1) + 0x4000_0000);
797        } else {
798            vstd_extra::external::ilog2::lemma_usize_ilog2_to32();
799            let ps4 = crate::mm::page_table::page_size_spec(4);
800            assert forall|i: int| 4 <= i < NR_LEVELS implies new_va.index[i] == self.index[i] by {};
801            new_va.to_vaddr_indices_eq_if_indices_eq(self, 4);
802            assert(self.to_vaddr_indices(1) == self.index[1] * pow2(21nat) as int
803                + self.to_vaddr_indices(2));
804            assert(self.to_vaddr_indices(2) == self.index[2] * pow2(30nat) as int
805                + self.to_vaddr_indices(3));
806            assert((self.index[3] + 1) * 0x80_0000_0000 == self.index[3] * 0x80_0000_0000
807                + 0x80_0000_0000) by (nonlinear_arith);
808            assert(new_va.to_vaddr_indices(3) == self.to_vaddr_indices(3) + 0x80_0000_0000);
809            assert(new_va.to_vaddr_indices(2) == self.to_vaddr_indices(2) + 0x80_0000_0000);
810            assert(new_va.to_vaddr_indices(1) == self.to_vaddr_indices(1) + 0x80_0000_0000);
811            assert(new_va.to_vaddr() as int == self.offset + self.to_vaddr_indices(0)
812                + 0x80_0000_0000);
813        }
814    }
815
816    /// Path extracted from abstract vaddr has correct length.
817    pub proof fn to_path_len(self, level: int)
818        requires
819            0 <= level < NR_LEVELS,
820        ensures
821            self.to_path(level).len() == NR_LEVELS - level,
822    {
823        self.rec_to_path_len(NR_LEVELS - 1, level);
824    }
825
826    proof fn rec_to_path_len(self, abstract_level: int, bottom_level: int)
827        requires
828            bottom_level <= abstract_level,
829        ensures
830            self.rec_to_path(abstract_level, bottom_level).len() == abstract_level - bottom_level
831                + 1,
832        decreases abstract_level - bottom_level,
833    {
834        // The recursive structure:
835        // - rec_to_path(a, b) = rec_to_path(a-1, b).push(index[a]) when a >= b
836        // - rec_to_path(a, b) = seq![] when a < b
837        // So rec_to_path(a, b).len() = rec_to_path(a-1, b).len() + 1 = ... = a - b + 1
838        if abstract_level > bottom_level {
839            self.rec_to_path_len(abstract_level - 1, bottom_level);
840        }
841        // Structural reasoning about recursive definition
842
843    }
844
845    /// Path extracted from abstract vaddr has valid indices.
846    pub proof fn to_path_inv(self, level: int)
847        requires
848            self.inv(),
849            0 <= level < NR_LEVELS,
850        ensures
851            self.to_path(level).inv(),
852    {
853        self.to_path_len(level);
854        assert forall|i: int| 0 <= i < self.to_path(level).len() implies TreePath::<
855            NR_ENTRIES,
856        >::elem_inv(#[trigger] self.to_path(level).index(i)) by {
857            let j = NR_LEVELS - 1 - i;
858            self.to_path_index(level, i);
859            assert(self.index.contains_key(j));
860        };
861    }
862}
863
864/// Connection between TreePath's vaddr and AbstractVaddr
865impl AbstractVaddr {
866    proof fn rec_vaddr_eq_if_indices_eq(
867        path1: TreePath<NR_ENTRIES>,
868        path2: TreePath<NR_ENTRIES>,
869        idx: int,
870    )
871        requires
872            path1.inv(),
873            path2.inv(),
874            path1.len() == path2.len(),
875            0 <= idx <= path1.len(),
876            forall|i: int| idx <= i < path1.len() ==> path1.index(i) == path2.index(i),
877        ensures
878            rec_vaddr(path1, idx) == rec_vaddr(path2, idx),
879        decreases path1.len() - idx,
880    {
881        if idx < path1.len() {
882            path1.index_satisfies_elem_inv(idx);
883            path2.index_satisfies_elem_inv(idx);
884            Self::rec_vaddr_eq_if_indices_eq(path1, path2, idx + 1);
885        }
886    }
887
888    /// If a TreePath matches this abstract vaddr's indices at all levels covered by the path,
889    /// then vaddr(path) equals the aligned compute_vaddr at the corresponding level.
890    pub proof fn path_matches_vaddr(self, path: TreePath<NR_ENTRIES>)
891        requires
892            self.inv(),
893            path.inv(),
894            path.len() <= NR_LEVELS,
895            forall|i: int| 0 <= i < path.len() ==> path.index(i) == self.index[NR_LEVELS - 1 - i],
896        ensures
897            vaddr(path) == self.align_down((NR_LEVELS - path.len() + 1) as int).compute_vaddr()
898                - self.align_down((NR_LEVELS - path.len() + 1) as int).offset,
899    {
900        if path.len() == 0 {
901            let aligned = self.align_down(5);
902            self.align_down_shape(4);
903            // align_down(5) zeroes index[3] on top of align_down(4), so all indices + offset are 0.
904            assert(aligned.index[3] == 0) by {
905                assert(aligned == AbstractVaddr {
906                    index: self.align_down(4).index.insert(3, 0),
907                    ..self.align_down(4)
908                });
909            };
910            assert(aligned.rec_compute_vaddr(4) == 0);
911            assert(aligned.rec_compute_vaddr(3) == 0) by {
912                assert(aligned.rec_compute_vaddr(3) == (aligned.index[3] * page_size(4)
913                    + aligned.rec_compute_vaddr(4)) as Vaddr);
914            };
915            assert(aligned.rec_compute_vaddr(2) == 0) by {
916                assert(aligned.rec_compute_vaddr(2) == (aligned.index[2] * page_size(3)
917                    + aligned.rec_compute_vaddr(3)) as Vaddr);
918            };
919            assert(aligned.rec_compute_vaddr(1) == 0) by {
920                assert(aligned.rec_compute_vaddr(1) == (aligned.index[1] * page_size(2)
921                    + aligned.rec_compute_vaddr(2)) as Vaddr);
922            };
923        } else {
924            let level = (NR_LEVELS - path.len()) as int;
925            assert(0 <= level < NR_LEVELS);
926            self.to_path_inv(level);
927            self.to_path_len(level);
928            assert forall|i: int| 0 <= i < path.len() implies #[trigger]path.index(i) == self.to_path(
929                level,
930            ).index(i) by {
931                self.to_path_index(level, i);
932            };
933            Self::rec_vaddr_eq_if_indices_eq(path, self.to_path(level), 0);
934            self.to_path_vaddr(level);
935            self.align_down_shape(level + 1);
936        }
937    }
938
939    /// The path index at position i corresponds to the abstract vaddr index at level (NR_LEVELS - 1 - i).
940    /// This is the key mapping between TreePath ordering and AbstractVaddr index ordering.
941    pub proof fn to_path_index(self, level: int, i: int)
942        requires
943            self.inv(),
944            0 <= level < NR_LEVELS,
945            0 <= i < NR_LEVELS - level,
946        ensures
947            self.to_path(level).index(i) == self.index[NR_LEVELS - 1 - i],
948    {
949        self.to_path_len(level);
950        self.rec_to_path_index(NR_LEVELS - 1, level, i);
951    }
952
953    proof fn rec_to_path_index(self, abstract_level: int, bottom_level: int, i: int)
954        requires
955            self.inv(),
956            0 <= bottom_level <= abstract_level < NR_LEVELS,
957            0 <= i < abstract_level - bottom_level + 1,
958        ensures
959            self.rec_to_path(abstract_level, bottom_level).index(i) == self.index[abstract_level
960                - i],
961        decreases abstract_level - bottom_level,
962    {
963        assert(self.index.contains_key(abstract_level));
964        if abstract_level == bottom_level {
965        } else {
966            let head = seq![self.index[abstract_level] as usize];
967            let tail = self.rec_to_path(abstract_level - 1, bottom_level);
968            let full = head.add(tail);
969            if i == 0 {
970            } else {
971                self.rec_to_path_index(abstract_level - 1, bottom_level, i - 1);
972                assert(0 <= i - 1 < tail.len()) by {
973                    self.rec_to_path_len(abstract_level - 1, bottom_level);
974                };
975            }
976        }
977    }
978
979    /// Direct connection: vaddr(to_path(level)) equals the aligned concrete vaddr.
980    /// This combines to_path_vaddr with to_vaddr_is_compute_vaddr.
981    pub proof fn to_path_vaddr_concrete(self, level: int)
982        requires
983            self.inv(),
984            0 <= level < NR_LEVELS,
985        ensures
986            vaddr(self.to_path(level)) == nat_align_down(
987                self.to_vaddr() as nat,
988                page_size((level + 1) as PagingLevel) as nat,
989            ) as usize,
990    {
991        self.to_path_vaddr(level);
992        let aligned = self.align_down(level + 1);
993        self.align_down_shape(level + 1);
994        aligned.to_vaddr_is_compute_vaddr();
995        self.align_down_concrete(level + 1);
996        aligned.reflect_prop(
997            nat_align_down(
998                self.to_vaddr() as nat,
999                page_size((level + 1) as PagingLevel) as nat,
1000            ) as Vaddr,
1001        );
1002    }
1003
1004    /// Key property: if we have a path that matches cur_va's indices, then
1005    /// vaddr(path) + page_size(level) bounds the range containing cur_va.
1006    pub proof fn vaddr_range_from_path(self, level: int)
1007        requires
1008            self.inv(),
1009            0 <= level < NR_LEVELS,
1010        ensures
1011            vaddr(self.to_path(level)) <= self.to_vaddr() < vaddr(self.to_path(level)) + page_size(
1012                (level + 1) as PagingLevel,
1013            ),
1014    {
1015        self.to_path_vaddr_concrete(level);
1016        let size = page_size((level + 1) as PagingLevel);
1017        let cur = self.to_vaddr() as nat;
1018        let start = vaddr(self.to_path(level));
1019
1020        assert(page_size((level + 1) as PagingLevel) >= PAGE_SIZE) by {
1021            lemma_page_size_ge_page_size((level + 1) as PagingLevel);
1022        };
1023        lemma_nat_align_down_sound(cur, size as nat);
1024    }
1025}
1026
1027} // verus!