ostd/specs/mm/page_table/
mod.rs

1#![allow(hidden_glob_reexports)]
2
3pub mod cursor;
4pub mod node;
5mod owners;
6mod view;
7
8pub use cursor::*;
9pub use node::*;
10pub use owners::*;
11pub use view::*;
12
13use vstd::arithmetic::power2::pow2;
14use vstd::prelude::*;
15use vstd_extra::arithmetic::*;
16use vstd_extra::ghost_tree::TreePath;
17use vstd_extra::ownership::*;
18
19use crate::mm::page_table::page_size;
20use crate::mm::page_table::PageTableConfig;
21use crate::mm::{PagingLevel, Vaddr};
22use crate::specs::arch::mm::{NR_ENTRIES, NR_LEVELS, PAGE_SIZE};
23
24use align_ext::AlignExt;
25
26verus! {
27
28/// An abstract representation of a virtual address as a sequence of indices, representing the
29/// values of the bit-fields that index into each level of the page table.
30/// `offset` is the lowest 12 bits (the offset into a 4096 byte page)
31/// `index[0]` is the next 9 bits (index into the 512 entries of the first level page table)
32/// and so on up to index[NR_LEVELS-1].
33pub struct AbstractVaddr {
34    pub offset: int,
35    pub index: Map<int, int>,
36}
37
38impl Inv for AbstractVaddr {
39    open spec fn inv(self) -> bool {
40        &&& 0 <= self.offset < PAGE_SIZE
41        &&& forall |i: int|
42            #![trigger self.index.contains_key(i)]
43        0 <= i < NR_LEVELS ==> {
44            &&& self.index.contains_key(i)
45            &&& 0 <= self.index[i]
46            &&& self.index[i] < NR_ENTRIES
47        }
48    }
49}
50
51impl AbstractVaddr {
52    /// Extract the AbstractVaddr components from a concrete virtual address.
53    /// - offset = lowest 12 bits
54    /// - index[i] = bits (12 + 9*i) to (12 + 9*(i+1) - 1) for each level
55    pub open spec fn from_vaddr(va: Vaddr) -> Self {
56        AbstractVaddr {
57            offset: (va % PAGE_SIZE) as int,
58            index: Map::new(
59                |i: int| 0 <= i < NR_LEVELS,
60                |i: int| ((va / pow2((12 + 9 * i) as nat) as usize) % NR_ENTRIES) as int,
61            ),
62        }
63    }
64
65    pub proof fn from_vaddr_wf(va: Vaddr)
66        ensures
67            AbstractVaddr::from_vaddr(va).inv(),
68    {
69        let abs = AbstractVaddr::from_vaddr(va);
70        assert(0 <= abs.offset < PAGE_SIZE);
71        assert forall |i: int|
72            #![trigger abs.index.contains_key(i)]
73            0 <= i < NR_LEVELS implies {
74                &&& abs.index.contains_key(i)
75                &&& 0 <= abs.index[i]
76                &&& abs.index[i] < NR_ENTRIES
77            } by {
78            // index[i] = (va / 2^(12+9*i)) % NR_ENTRIES, which is in [0, NR_ENTRIES)
79        };
80    }
81
82    /// Reconstruct the concrete virtual address from the AbstractVaddr components.
83    /// va = offset + sum(index[i] * 2^(12 + 9*i)) for i in 0..NR_LEVELS
84    pub open spec fn to_vaddr(self) -> Vaddr {
85        (self.offset + self.to_vaddr_indices(0)) as Vaddr
86    }
87
88    /// Helper: sum of index[i] * 2^(12 + 9*i) for i in start..NR_LEVELS
89    pub open spec fn to_vaddr_indices(self, start: int) -> int
90        decreases NR_LEVELS - start when start <= NR_LEVELS
91    {
92        if start >= NR_LEVELS {
93            0
94        } else {
95            self.index[start] * pow2((12 + 9 * start) as nat) as int
96                + self.to_vaddr_indices(start + 1)
97        }
98    }
99
100    /// reflect(self, va) holds when self is the abstract representation of va.
101    pub open spec fn reflect(self, va: Vaddr) -> bool {
102        self == Self::from_vaddr(va)
103    }
104
105    /// If self reflects va, then self.to_vaddr() == va and self == from_vaddr(va).
106    /// The first ensures requires proving the round-trip property: from_vaddr(va).to_vaddr() == va.
107    pub broadcast proof fn reflect_prop(self, va: Vaddr)
108        requires
109            self.inv(),
110            self.reflect(va),
111        ensures
112            #[trigger] self.to_vaddr() == va,
113            #[trigger] Self::from_vaddr(va) == self,
114    {
115        // self.reflect(va) means self == from_vaddr(va)
116        // So self.to_vaddr() == from_vaddr(va).to_vaddr()
117        // We need: from_vaddr(va).to_vaddr() == va (round-trip property)
118        Self::from_vaddr_to_vaddr_roundtrip(va);
119    }
120
121    /// Round-trip property: extracting and reconstructing a VA gives back the original.
122    pub proof fn from_vaddr_to_vaddr_roundtrip(va: Vaddr)
123        ensures
124            Self::from_vaddr(va).to_vaddr() == va,
125    {
126        // va = offset + sum(index[i] * 2^(12+9*i))
127        // from_vaddr extracts: offset = va % 2^12, index[i] = (va / 2^(12+9*i)) % 2^9
128        // to_vaddr reconstructs: offset + sum(index[i] * 2^(12+9*i))
129        // This equals va by the uniqueness of positional representation in mixed radix.
130        admit()
131    }
132
133    /// from_vaddr(va) reflects va (by definition of reflect).
134    pub broadcast proof fn reflect_from_vaddr(va: Vaddr)
135        ensures
136            #[trigger] Self::from_vaddr(va).reflect(va),
137            #[trigger] Self::from_vaddr(va).inv(),
138    {
139        Self::from_vaddr_wf(va);
140    }
141
142    /// If self.inv(), then self reflects self.to_vaddr().
143    pub broadcast proof fn reflect_to_vaddr(self)
144        requires
145            self.inv(),
146        ensures
147            #[trigger] self.reflect(self.to_vaddr()),
148    {
149        Self::to_vaddr_from_vaddr_roundtrip(self);
150    }
151
152    /// Inverse round-trip: to_vaddr then from_vaddr gives back the original AbstractVaddr.
153    pub proof fn to_vaddr_from_vaddr_roundtrip(abs: Self)
154        requires
155            abs.inv(),
156        ensures
157            Self::from_vaddr(abs.to_vaddr()) == abs,
158    {
159        // Similar reasoning: the positional representation is unique
160        admit()
161    }
162
163    /// If two AbstractVaddrs reflect the same va, they are equal.
164    pub broadcast proof fn reflect_eq(self, other: Self, va: Vaddr)
165        requires
166            self.reflect(va),
167            other.reflect(va),
168        ensures
169            #![auto] (self == other),
170    { }
171
172    pub open spec fn align_down(self, level: int) -> Self
173        decreases level when level >= 1
174    {
175        if level == 1 {
176            AbstractVaddr {
177                offset: 0,
178                index: self.index,
179            }
180        } else {
181            let tmp = self.align_down(level - 1);
182            AbstractVaddr {
183                index: tmp.index.insert(level - 2, 0),
184                ..tmp
185            }
186        }
187    }
188
189    pub proof fn align_down_inv(self, level: int)
190        requires
191            1 <= level < NR_LEVELS,
192            self.inv(),
193        ensures
194            self.align_down(level).inv(),
195            forall |i: int| level <= i < NR_LEVELS ==> #[trigger] self.index[i - 1] == self.align_down(level).index[i - 1],
196        decreases level
197    {
198        if level == 1 {
199            assert(self.inv());
200        } else {
201            let tmp = self.align_down(level - 1);
202            self.align_down_inv(level - 1);
203        }
204    }
205
206    /// If two AbstractVaddrs share the same indices at levels >= level-1 (i.e., index[level-1] and above),
207    /// then aligning them down to `level` gives the same to_vaddr() result.
208    /// This is because align_down(level) zeroes offset and indices 0 through level-2,
209    /// so only indices level-1 and above affect the to_vaddr() result.
210    pub proof fn align_down_to_vaddr_eq_if_upper_indices_eq(self, other: Self, level: int)
211        requires
212            1 <= level <= NR_LEVELS,
213            self.inv(),
214            other.inv(),
215            // Indices at level-1 and above are equal
216            forall |i: int| level - 1 <= i < NR_LEVELS ==> self.index[i] == other.index[i],
217        ensures
218            self.align_down(level).to_vaddr() == other.align_down(level).to_vaddr(),
219        decreases level
220    {
221        // align_down(level).to_vaddr() = sum of index[i] * 2^(12 + 9*i) for i >= level-1
222        // Since indices at levels >= level-1 are equal, the sums are equal
223        //
224        // align_down(level) zeroes offset and indices 0 through level-2
225        // So to_vaddr() = 0 + sum(0 * ...) for i < level-1 + sum(index[i] * ...) for i >= level-1
226        //               = sum(index[i] * 2^(12+9*i)) for i >= level-1
227        //
228        // Since self.index[i] == other.index[i] for i >= level-1, the sums are equal.
229
230        // Use the axiom that relates align_down to concrete alignment
231        self.align_down_concrete(level);
232        other.align_down_concrete(level);
233
234        // align_down_concrete says:
235        //   self.align_down(level).reflect(nat_align_down(self.to_vaddr(), page_size(level)))
236        //   other.align_down(level).reflect(nat_align_down(other.to_vaddr(), page_size(level)))
237        //
238        // reflect means the AbstractVaddr == from_vaddr(concrete_va)
239        // So: self.align_down(level) == from_vaddr(nat_align_down(self.to_vaddr(), page_size(level)))
240        //     other.align_down(level) == from_vaddr(nat_align_down(other.to_vaddr(), page_size(level)))
241        //
242        // Therefore:
243        //   self.align_down(level).to_vaddr() == nat_align_down(self.to_vaddr(), page_size(level))
244        //   other.align_down(level).to_vaddr() == nat_align_down(other.to_vaddr(), page_size(level))
245        //
246        // We need: nat_align_down(self.to_vaddr(), page_size(level)) == nat_align_down(other.to_vaddr(), page_size(level))
247        //
248        // page_size(level) = 2^(12 + 9*(level-1)) = 2^(3 + 9*level)
249        // nat_align_down(va, size) = va - (va % size) = (va / size) * size
250        //
251        // va / page_size(level) = va / 2^(12 + 9*(level-1))
252        //                       = sum over i >= level-1: index[i] * 2^(9*(i - level + 1))
253        //
254        // Since indices at i >= level-1 are equal, va / page_size(level) are equal, so align_down results are equal.
255
256        // For now, admit the arithmetic
257        admit()
258    }
259
260    pub axiom fn align_down_concrete(self, level: int)
261        requires
262            1 <= level <= NR_LEVELS,
263        ensures
264            self.align_down(level).reflect(nat_align_down(self.to_vaddr() as nat, page_size(level as PagingLevel) as nat) as Vaddr);
265
266    pub open spec fn align_up(self, level: int) -> Self {
267        let lower_aligned = self.align_down(level - 1);
268        lower_aligned.next_index(level)
269    }
270
271    pub axiom fn align_up_concrete(self, level: int)
272        requires
273            1 <= level <= NR_LEVELS,
274        ensures
275            self.align_up(level).reflect(nat_align_up(self.to_vaddr() as nat, page_size(level as PagingLevel) as nat) as Vaddr);
276
277    pub axiom fn align_diff(self, level: int)
278        requires
279            1 <= level <= NR_LEVELS,
280        ensures
281            nat_align_up(self.to_vaddr() as nat, page_size(level as PagingLevel) as nat) ==
282            nat_align_down(self.to_vaddr() as nat, page_size(level as PagingLevel) as nat) + page_size(level as PagingLevel),
283            nat_align_up(self.to_vaddr() as nat, page_size(level as PagingLevel) as nat) < usize::MAX;
284
285    pub open spec fn next_index(self, level: int) -> Self
286        decreases NR_LEVELS - level when 1 <= level <= NR_LEVELS
287    {
288        let index = self.index[level - 1];
289        let next_index = index + 1;
290        if next_index == NR_ENTRIES && level < NR_LEVELS {
291            let next_va = Self {
292                offset: self.offset,
293                index: self.index.insert(level - 1, 0),
294            };
295            next_va.next_index(level + 1)
296        } else if next_index == NR_ENTRIES && level == NR_LEVELS {
297            Self {
298                offset: self.offset,
299                index: self.index.insert(level - 1, 0),
300            }
301        } else {
302            Self {
303                offset: self.offset,
304                index: self.index.insert(level - 1, next_index),
305            }
306        }
307    }
308
309    pub open spec fn wrapped(self, start_level: int, level: int) -> bool
310        decreases NR_LEVELS - level when 1 <= start_level <= level <= NR_LEVELS
311    {
312        &&& self.next_index(start_level).index[level - 1] == 0 ==> {
313            &&& self.index[level - 1] + 1 == NR_ENTRIES
314            &&& if level < NR_LEVELS {
315                self.wrapped(start_level, level + 1)
316            } else {
317                true
318            }
319        }
320        &&& self.next_index(start_level).index[level - 1] != 0 ==>
321            self.index[level - 1] + 1 < NR_ENTRIES
322    }
323
324    pub proof fn use_wrapped(self, start_level: int, level: int)
325        requires
326            1 <= start_level <= level < NR_LEVELS,
327            self.wrapped(start_level, level),
328            self.next_index(start_level).index[level - 1] == 0
329        ensures
330            self.index[level - 1] + 1 == NR_ENTRIES
331    { }
332
333    pub proof fn wrapped_unwrap(self, start_level: int, level: int)
334        requires
335            1 <= start_level <= level < NR_LEVELS,
336            self.wrapped(start_level, level),
337            self.next_index(start_level).index[level - 1] == 0,
338        ensures
339            self.wrapped(start_level, level + 1)
340    { }
341
342    pub proof fn next_index_wrap_condition(self, level: int)
343        requires
344            self.inv(),
345            1 <= level <= NR_LEVELS,
346        ensures
347            self.wrapped(level, level)
348    { admit() }
349
350    //
351    // === Connection to TreePath and concrete Vaddr ===
352    //
353
354    /// Computes the concrete vaddr from the abstract representation.
355    /// This matches the structure:
356    ///   index[NR_LEVELS-1] * 2^39 + index[NR_LEVELS-2] * 2^30 + ... + index[0] * 2^12 + offset
357    pub open spec fn compute_vaddr(self) -> Vaddr {
358        self.rec_compute_vaddr(0)
359    }
360
361    /// Helper for computing vaddr recursively from level i upward.
362    pub open spec fn rec_compute_vaddr(self, i: int) -> Vaddr
363        decreases NR_LEVELS - i when 0 <= i <= NR_LEVELS
364    {
365        if i >= NR_LEVELS {
366            self.offset as Vaddr
367        } else {
368            let shift = page_size((i + 1) as PagingLevel);
369            (self.index[i] * shift + self.rec_compute_vaddr(i + 1)) as Vaddr
370        }
371    }
372
373    /// Extracts a TreePath from this abstract vaddr, from the root down to the given level.
374    /// The path has length (NR_LEVELS - level), containing indices for paging levels NR_LEVELS..level+1.
375    /// - level=0: full path of length NR_LEVELS with indices for all levels
376    /// - level=3: path of length 1 with just the root index
377    ///
378    /// Path index mapping:
379    /// - path.index(0) = self.index[NR_LEVELS - 1]  (root level)
380    /// - path.index(i) = self.index[NR_LEVELS - 1 - i]
381    /// - path.index(NR_LEVELS - level - 1) = self.index[level]  (last entry)
382    pub open spec fn to_path(self, level: int) -> TreePath<NR_ENTRIES>
383        recommends 0 <= level < NR_LEVELS
384    {
385        TreePath(self.rec_to_path(NR_LEVELS - 1, level))
386    }
387
388    /// Builds the path sequence from abstract_level down to bottom_level (both inclusive).
389    /// abstract_level and bottom_level refer to the index keys in self.index (0 = lowest level, NR_LEVELS-1 = root).
390    /// Returns indices in order from highest level (first in seq) to lowest level (last in seq).
391    pub open spec fn rec_to_path(self, abstract_level: int, bottom_level: int) -> Seq<usize>
392        decreases abstract_level - bottom_level when bottom_level <= abstract_level
393    {
394        if abstract_level < bottom_level {
395            seq![]
396        } else if abstract_level == bottom_level {
397            // Base case: just this one level
398            seq![self.index[abstract_level] as usize]
399        } else {
400            // Recursive case: get higher levels first, then push this level's index
401            self.rec_to_path(abstract_level - 1, bottom_level).push(self.index[abstract_level] as usize)
402        }
403    }
404
405    /// The vaddr of the path from this abstract vaddr equals the aligned vaddr at that level.
406    pub proof fn to_path_vaddr(self, level: int)
407        requires
408            self.inv(),
409            0 <= level < NR_LEVELS,
410        ensures
411            vaddr(self.to_path(level)) == self.align_down(level + 1).compute_vaddr(),
412    {
413        admit() // Structural induction on the recursive definitions
414    }
415
416    /// The concrete to_vaddr() equals the computed vaddr.
417    pub axiom fn to_vaddr_is_compute_vaddr(self)
418        requires
419            self.inv(),
420        ensures
421            self.to_vaddr() == self.compute_vaddr();
422
423    /// Incrementing index[level-1] by 1 increases to_vaddr() by page_size(level).
424    pub proof fn index_increment_adds_page_size(self, level: int)
425        requires
426            self.inv(),
427            1 <= level <= NR_LEVELS,
428            self.index[level - 1] + 1 < NR_ENTRIES,
429        ensures
430            (Self {
431                index: self.index.insert(level - 1, self.index[level - 1] + 1),
432                ..self
433            }).to_vaddr() == self.to_vaddr() + page_size(level as PagingLevel),
434    {
435        let new_va = Self {
436            index: self.index.insert(level - 1, self.index[level - 1] + 1),
437            ..self
438        };
439        // Establish new_va.inv()
440        assert forall |i: int|
441            #![trigger new_va.index.contains_key(i)]
442            0 <= i < NR_LEVELS implies {
443                &&& new_va.index.contains_key(i)
444                &&& 0 <= new_va.index[i]
445                &&& new_va.index[i] < NR_ENTRIES
446            } by {
447            // Use self.inv() to establish bounds on self.index[i]
448            assert(self.index.contains_key(i));
449            assert(0 <= self.index[i] < NR_ENTRIES);
450            if i == level - 1 {
451                assert(new_va.index[i] == self.index[i] + 1);
452                assert(0 <= self.index[i]);
453                assert(0 <= new_va.index[i]);
454            } else {
455                assert(new_va.index[i] == self.index[i]);
456            }
457        };
458        assert(new_va.inv());
459        self.to_vaddr_is_compute_vaddr();
460        new_va.to_vaddr_is_compute_vaddr();
461        // The compute_vaddr only differs at index[level-1], which contributes
462        // an additional page_size(level) to the sum.
463        admit()
464    }
465
466    /// Path extracted from abstract vaddr has correct length.
467    pub proof fn to_path_len(self, level: int)
468        requires
469            0 <= level < NR_LEVELS,
470        ensures
471            self.to_path(level).len() == NR_LEVELS - level,
472    {
473        self.rec_to_path_len(NR_LEVELS - 1, level);
474    }
475
476    proof fn rec_to_path_len(self, abstract_level: int, bottom_level: int)
477        requires
478            bottom_level <= abstract_level,
479        ensures
480            self.rec_to_path(abstract_level, bottom_level).len() == abstract_level - bottom_level + 1,
481        decreases abstract_level - bottom_level,
482    {
483        // The recursive structure:
484        // - rec_to_path(a, b) = rec_to_path(a-1, b).push(index[a]) when a >= b
485        // - rec_to_path(a, b) = seq![] when a < b
486        // So rec_to_path(a, b).len() = rec_to_path(a-1, b).len() + 1 = ... = a - b + 1
487        if abstract_level > bottom_level {
488            self.rec_to_path_len(abstract_level - 1, bottom_level);
489        }
490        // Structural reasoning about recursive definition
491        admit()
492    }
493
494    /// Path extracted from abstract vaddr has valid indices.
495    pub proof fn to_path_inv(self, level: int)
496        requires
497            self.inv(),
498            0 <= level < NR_LEVELS,
499        ensures
500            self.to_path(level).inv(),
501    {
502        self.to_path_len(level);
503        assert forall|i: int| 0 <= i < self.to_path(level).len()
504        implies TreePath::<NR_ENTRIES>::elem_inv(#[trigger] self.to_path(level).index(i)) by {
505            // Each path index comes from self.index which is bounded by NR_ENTRIES
506            admit()
507        };
508    }
509}
510
511/// Connection between TreePath's vaddr and AbstractVaddr
512impl AbstractVaddr {
513    /// If a TreePath matches this abstract vaddr's indices at all levels covered by the path,
514    /// then vaddr(path) equals the aligned compute_vaddr at the corresponding level.
515    pub proof fn path_matches_vaddr(self, path: TreePath<NR_ENTRIES>)
516        requires
517            self.inv(),
518            path.inv(),
519            path.len() <= NR_LEVELS,
520            forall|i: int| 0 <= i < path.len() ==>
521                path.index(i) == self.index[NR_LEVELS - 1 - i],
522        ensures
523            vaddr(path) == self.align_down((NR_LEVELS - path.len() + 1) as int).compute_vaddr()
524                - self.align_down((NR_LEVELS - path.len() + 1) as int).offset,
525    {
526        admit() // Induction on path.len()
527    }
528
529    /// The path index at position i corresponds to the abstract vaddr index at level (NR_LEVELS - 1 - i).
530    /// This is the key mapping between TreePath ordering and AbstractVaddr index ordering.
531    pub proof fn to_path_index(self, level: int, i: int)
532        requires
533            self.inv(),
534            0 <= level < NR_LEVELS,
535            0 <= i < NR_LEVELS - level,
536        ensures
537            self.to_path(level).index(i) == self.index[NR_LEVELS - 1 - i],
538    {
539        self.to_path_len(level);
540        // The path is built by rec_to_path(NR_LEVELS - 1, level)
541        // which produces indices from level NR_LEVELS-1 down to level
542        // in order: index[NR_LEVELS-1], index[NR_LEVELS-2], ..., index[level]
543        // So path.index(i) = index[NR_LEVELS - 1 - i]
544        admit()
545    }
546
547    /// Direct connection: vaddr(to_path(level)) equals the aligned concrete vaddr.
548    /// This combines to_path_vaddr with to_vaddr_is_compute_vaddr.
549    pub proof fn to_path_vaddr_concrete(self, level: int)
550        requires
551            self.inv(),
552            0 <= level < NR_LEVELS,
553        ensures
554            vaddr(self.to_path(level)) == nat_align_down(self.to_vaddr() as nat, page_size((level + 1) as PagingLevel) as nat) as usize,
555    {
556        self.to_path_vaddr(level);
557        self.to_vaddr_is_compute_vaddr();
558        self.align_down_concrete(level + 1);
559        // to_path_vaddr gives: vaddr(to_path(level)) == align_down(level+1).compute_vaddr()
560        // to_vaddr_is_compute_vaddr gives: to_vaddr() == compute_vaddr()
561        // align_down_concrete gives: align_down(level+1).reflect(nat_align_down(to_vaddr(), page_size(level+1)))
562        // We need to connect align_down(level+1).compute_vaddr() to nat_align_down(to_vaddr(), page_size(level+1))
563        admit()
564    }
565
566    /// Key property: if we have a path that matches cur_va's indices, then
567    /// vaddr(path) + page_size(level) bounds the range containing cur_va.
568    pub proof fn vaddr_range_from_path(self, level: int)
569        requires
570            self.inv(),
571            0 <= level < NR_LEVELS,
572        ensures
573            vaddr(self.to_path(level)) <= self.to_vaddr()
574                < vaddr(self.to_path(level)) + page_size((level + 1) as PagingLevel),
575    {
576        self.to_path_vaddr_concrete(level);
577        // nat_align_down(x, n) <= x < nat_align_down(x, n) + n
578        admit()
579    }
580}
581
582}