ostd/specs/mm/page_table/cursor/
page_table_cursor_specs.rs

1use vstd::prelude::*;
2
3use vstd::set_lib::*;
4
5use vstd_extra::ownership::*;
6use vstd_extra::prelude::TreePath;
7
8use crate::mm::page_prop::PageProperty;
9use crate::mm::page_table::*;
10use crate::mm::{Paddr, PagingConstsTrait, PagingLevel, Vaddr};
11use crate::specs::arch::mm::{NR_ENTRIES, NR_LEVELS, PAGE_SIZE};
12use crate::specs::arch::paging_consts::PagingConsts;
13use crate::specs::mm::page_table::cursor::owners::*;
14use crate::specs::mm::page_table::*;
15use vstd_extra::arithmetic::*;
16
17use core::ops::Range;
18
19verus! {
20
21impl<C: PageTableConfig> PageTableOwner<C> {
22    pub uninterp spec fn new_cursor_owner_spec<'rcu>(self) -> (Self, CursorOwner<'rcu, C>);
23}
24
25/// A `CursorView` is not aware that the underlying structure of the page table is a tree.
26/// It treats the page table as an array of mappings of various sizes, and the cursor itself as the
27/// current virtual address, moving from low to high addresses. These functions specify its behavior
28/// and provide a simple interface for reasoning about its behavior.
29impl<C: PageTableConfig> CursorView<C> {
30
31    pub open spec fn item_into_mapping(va: Vaddr, item: C::Item) -> Mapping {
32        let (paddr, level, prop) = C::item_into_raw_spec(item);
33        let size = page_size(level);
34        Mapping {
35            va_range: va..(va + size) as usize,
36            pa_range: paddr..(paddr + size) as Paddr,
37            page_size: size,
38            property: prop,
39        }
40    }
41
42    /// This function checks if the current virtual address is mapped. It does not correspond
43    /// to a cursor method itself, but defines what it means for an entry to present:
44    /// there is a mapping whose virtual address range contains the current virtual address.
45    pub open spec fn present(self) -> bool {
46        self.mappings.filter(|m: Mapping| m.va_range.start <= self.cur_va < m.va_range.end).len() > 0
47    }
48
49    pub open spec fn query_mapping(self) -> Mapping
50        recommends
51            self.present(),
52    {
53        self.mappings.filter(|m: Mapping| m.va_range.start <= self.cur_va < m.va_range.end).choose()
54    }
55
56    pub open spec fn query(self, paddr: Paddr, size: usize, prop: PageProperty) -> bool {
57        let m = self.query_mapping();
58        m.pa_range.start == paddr && m.page_size == size && m.property == prop
59    }
60
61    pub open spec fn query_range(self) -> Range<Vaddr> {
62        self.query_mapping().va_range
63    }
64
65    /// The VA range of the current slot when mapping a page of the given size.
66    /// Works for both present and absent mappings: when present, equals query_range() for
67    /// a mapping of that size; when absent, returns the aligned range that would be mapped.
68    pub open spec fn cur_slot_range(self, size: usize) -> Range<Vaddr> {
69        let start = nat_align_down(self.cur_va as nat, size as nat) as Vaddr;
70        start..(start as nat + size as nat) as Vaddr
71    }
72
73    /// This predicate specifies the behavior of the `query` method. It states that the current item
74    /// in the page table matches the given item, mapped at the resulting virtual address range.
75    pub open spec fn query_item_spec(self, item: C::Item) -> Option<Range<Vaddr>>
76        recommends
77            self.present(),
78    {
79        let (paddr, level, prop) = C::item_into_raw_spec(item);
80        let size = page_size(level);
81        if self.query(paddr, size, prop) {
82            Some(self.query_range())
83        } else {
84            None
85        }
86    }
87
88    /// The specification for the internal function, `find_next_impl`. It finds the next mapped virtual address
89    /// that is at most `len` bytes away from the current virtual address. TODO: add the specifications for
90    /// `find_unmap_subtree` and `split_huge`, which are used by other functions that call this one.
91    /// This returns a mapping rather than the address because that is useful when it's called as a subroutine.
92    pub open spec fn find_next_impl_spec(self, len: usize, find_unmap_subtree: bool, split_huge: bool) -> (Self, Option<Mapping>) {
93        let mappings_in_range = self.mappings.filter(|m: Mapping| self.cur_va <= m.va_range.start < self.cur_va + len);
94
95        if mappings_in_range.len() > 0 {
96            let mapping = mappings_in_range.find_unique_minimal(|m: Mapping, n: Mapping| m.va_range.start < n.va_range.start);
97            let view = CursorView {
98                cur_va: mapping.va_range.end,
99                ..self
100            };
101            (view, Some(mapping))
102        } else {
103            let view = CursorView {
104                cur_va: (self.cur_va + len) as Vaddr,
105                ..self
106            };
107            (view, None)
108        }
109    }
110
111    /// Actual specification for `find_next`. The cursor finds the next mapped virtual address
112    /// that is at most `len` bytes away from the current virtual address, returns it, and then
113    /// moves the cursor forward to the next end of its range.
114    pub open spec fn find_next_spec(self, len: usize) -> (Self, Option<Vaddr>) {
115        let (cursor, mapping) = self.find_next_impl_spec(len, false, false);
116        if mapping is Some {
117            (cursor, Some(mapping.unwrap().va_range.start))
118        } else {
119            (cursor, None)
120        }
121    }
122
123    /// Jump just sets the current virtual address to the given address.
124    pub open spec fn jump_spec(self, va: usize) -> Self {
125        CursorView {
126            cur_va: va as Vaddr,
127            ..self
128        }
129    }
130
131    pub open spec fn align_up_spec(self, size: usize) -> Vaddr {
132        nat_align_up(self.cur_va as nat, size as nat) as Vaddr
133    }
134
135    pub open spec fn split_index(m: Mapping, new_size: usize, n: usize) -> Mapping {
136        Mapping {
137            va_range: (m.va_range.start + n * new_size) as usize..(m.va_range.start + (n + 1) * new_size) as usize,
138            pa_range: (m.pa_range.start + n * new_size) as usize..(m.pa_range.start + (n + 1) * new_size) as usize,
139            page_size: new_size,
140            property: m.property,
141        }
142    }
143
144    /// After `split_if_mapped_huge_spec(new_size)`, a sub-mapping at `cur_va`
145    /// still exists.  The witness is `split_index(m, new_size, k)` where
146    /// `k = (cur_va - m.va_range.start) / new_size`.
147    pub proof fn split_if_mapped_huge_spec_preserves_present(v: Self, new_size: usize)
148        requires
149            v.inv(),
150            v.present(),
151            new_size > 0,
152            v.query_mapping().page_size > 0,
153            v.query_mapping().page_size % new_size == 0,
154        ensures
155            v.split_if_mapped_huge_spec(new_size).present(),
156    {
157        let cur_va = v.cur_va;
158        let m = v.query_mapping();
159        let ps = m.page_size;
160
161        // Step 1: m covers cur_va.
162        // present() = filter.len() > 0 where filter = mappings.filter(pred).
163        // query_mapping() = filter.choose().
164        // From inv(): mappings.finite() ⇒ filter.finite().
165        // axiom_set_choose_len: finite ∧ len ≠ 0 ⇒ contains(choose()).
166        // So filter.contains(m), meaning m satisfies the predicate.
167        //
168        // Rather than reconstructing the filter, assert the consequence directly:
169        // query_mapping() ∈ mappings and covers cur_va.
170        assert(v.mappings.contains(m) && m.va_range.start <= cur_va && cur_va < m.va_range.end) by {
171            // The filter set used by present()/query_mapping()
172            let f = v.mappings.filter(|m2: Mapping| m2.va_range.start <= v.cur_va < m2.va_range.end);
173            assert(f.finite()) by {
174                vstd::set::axiom_set_intersect_finite::<Mapping>(
175                    v.mappings,
176                    Set::new(|m2: Mapping| m2.va_range.start <= v.cur_va < m2.va_range.end));
177            };
178            vstd::set::axiom_set_choose_len(f);
179        };
180        assert(m.inv());
181        assert(m.va_range.start + ps == m.va_range.end);
182
183        // Step 2: compute witness index k = (cur_va - m.va_range.start) / new_size.
184        let diff: int = cur_va as int - m.va_range.start as int;
185        let ki: int = diff / new_size as int;
186        vstd::arithmetic::div_mod::lemma_fundamental_div_mod(diff, new_size as int);
187        vstd::arithmetic::div_mod::lemma_mod_division_less_than_divisor(diff, new_size as int);
188        vstd::arithmetic::div_mod::lemma_div_pos_is_pos(diff, new_size as int);
189        // diff == ki * new_size + diff % new_size, 0 <= diff % new_size < new_size, ki >= 0.
190
191        // Step 3: ki < ps / new_size (witness is in range).
192        vstd::arithmetic::div_mod::lemma_fundamental_div_mod(ps as int, new_size as int);
193        assert(ki < ps as int / new_size as int) by {
194            if ki >= ps as int / new_size as int {
195                vstd::arithmetic::mul::lemma_mul_inequality(
196                    ps as int / new_size as int, ki, new_size as int);
197            }
198        };
199
200        // Step 4: the sub-mapping covers cur_va.
201        let sub = Self::split_index(m, new_size, ki as usize);
202        // From fundamental_div_mod: ki * new_size <= diff < (ki + 1) * new_size.
203        // sub.va_range = [m.start + ki*new_size, m.start + (ki+1)*new_size)
204        // cur_va = m.start + diff, so sub covers cur_va.
205        // Help the solver with the usize ↔ int connection:
206        assert(ki * (new_size as int) >= 0) by {
207            vstd::arithmetic::mul::lemma_mul_nonnegative(ki, new_size as int);
208        };
209        assert((ki + 1) * (new_size as int) <= ps as int) by {
210            vstd::arithmetic::mul::lemma_mul_inequality(
211                ki + 1, ps as int / new_size as int, new_size as int);
212        };
213        // Values stay within [0, m.va_range.end) ⊂ [0, MAX_USERSPACE_VADDR), so no usize overflow.
214        // Connect sub's range to int arithmetic (no usize overflow since values < MAX_USERSPACE_VADDR).
215        assert(m.va_range.start as int + (ki + 1) * (new_size as int)
216            <= (m.va_range.end as int)) by {
217            vstd::arithmetic::mul::lemma_mul_is_commutative(ki + 1, new_size as int);
218            vstd::arithmetic::mul::lemma_mul_is_commutative(
219                ps as int / new_size as int, new_size as int);
220        };
221        // The as-usize cast is a no-op: all values are in [0, MAX_USERSPACE_VADDR).
222        assert(ki as usize as int == ki);
223        vstd::arithmetic::mul::lemma_mul_is_distributive_add(new_size as int, ki, 1 as int);
224        assert((cur_va as int) >= (m.va_range.start as int) + ki * (new_size as int));
225        assert((cur_va as int) < (m.va_range.start as int) + (ki + 1) * (new_size as int));
226        assert(sub.va_range.start <= cur_va);
227        assert(cur_va < sub.va_range.end);
228
229        // Step 5: sub ∈ new_self.mappings.
230        // ki is in the domain of the map, so sub = split_index(m, new_size, ki) is in new_mappings.
231        let new_self = v.split_if_mapped_huge_spec(new_size);
232        let domain = Set::<int>::new(|n:int| 0 <= n < ps as int / new_size as int);
233        assert(domain.contains(ki));
234        assert(new_self.mappings.contains(sub));
235
236        // Step 6: new_self.present().
237        // sub is in new_self.mappings and covers cur_va, so it's in the filter.
238        // The filter is finite (subset of new_self.mappings which is finite due to
239        // finite original mappings + finite new_mappings).
240        // finite + non-empty → len > 0 → present().
241        let new_filter = new_self.mappings.filter(
242            |m2: Mapping| m2.va_range.start <= new_self.cur_va < m2.va_range.end);
243        assert(new_filter.contains(sub));
244        assert(new_self.mappings.finite()) by {
245            // v.mappings - {m} is finite (remove from finite).
246            vstd::set::axiom_set_remove_finite(v.mappings, m);
247            // new_mappings = domain.map(f) where domain = int::range_set(0, ps/new_size) is finite.
248            let domain = Set::<int>::new(|n:int| 0 <= n < ps as int / new_size as int);
249            assert(domain =~= int::range_set(0int, ps as int / new_size as int));
250            vstd::set_lib::range_set_properties::<int>(0int, ps as int / new_size as int);
251            domain.lemma_map_finite(|n:int| Self::split_index(m, new_size, n as usize));
252            // (v.mappings - {m}) ∪ new_mappings: union of two finite sets.
253            vstd::set::axiom_set_union_finite(
254                v.mappings.remove(m),
255                domain.map(|n:int| Self::split_index(m, new_size, n as usize)));
256        };
257        assert(new_filter.finite()) by {
258            vstd::set::axiom_set_intersect_finite::<Mapping>(
259                new_self.mappings,
260                Set::new(|m2: Mapping| m2.va_range.start <= new_self.cur_va < m2.va_range.end));
261        };
262        vstd::set::axiom_set_contains_len(new_filter, sub);
263    }
264
265    /// After `split_if_mapped_huge_spec(new_size)` on a valid view, the
266    /// mapping at `cur_va` has `page_size == new_size < m.page_size`.
267    ///
268    /// The sub-mapping `split_index(m, new_size, k)` has `page_size = new_size`.
269    /// No other mapping from the original view covers `cur_va` (non-overlapping),
270    /// so `query_mapping()` must return a sub-mapping with `page_size = new_size`.
271    pub proof fn split_if_mapped_huge_spec_decreases_page_size(v: Self, new_size: usize)
272        requires
273            v.inv(),
274            v.present(),
275            new_size > 0,
276            v.query_mapping().page_size > new_size,
277            v.query_mapping().page_size % new_size == 0,
278        ensures
279            v.split_if_mapped_huge_spec(new_size).present(),
280            v.split_if_mapped_huge_spec(new_size).query_mapping().page_size < v.query_mapping().page_size,
281    {
282        Self::split_if_mapped_huge_spec_preserves_present(v, new_size);
283
284        let cur_va = v.cur_va;
285        let m = v.query_mapping();
286        let new_self = v.split_if_mapped_huge_spec(new_size);
287        let m2 = new_self.query_mapping();
288        let ps = m.page_size;
289
290        // m covers cur_va (same chain as in preserves_present)
291        assert(v.mappings.contains(m) && m.va_range.start <= cur_va && cur_va < m.va_range.end) by {
292            let f = v.mappings.filter(
293                |m2: Mapping| m2.va_range.start <= v.cur_va < m2.va_range.end);
294            assert(f.finite()) by {
295                vstd::set::axiom_set_intersect_finite::<Mapping>(
296                    v.mappings,
297                    Set::new(|m2: Mapping| m2.va_range.start <= v.cur_va < m2.va_range.end));
298            };
299            vstd::set::axiom_set_choose_len(f);
300        };
301
302        // m2 covers cur_va (from new_self.present() + choose)
303        assert(new_self.mappings.contains(m2)
304            && m2.va_range.start <= cur_va && cur_va < m2.va_range.end) by {
305            let f = new_self.mappings.filter(
306                |m3: Mapping| m3.va_range.start <= new_self.cur_va < m3.va_range.end);
307            assert(new_self.mappings.finite()) by {
308                vstd::set::axiom_set_remove_finite(v.mappings, m);
309                let domain = Set::<int>::new(|n:int| 0 <= n < ps as int / new_size as int);
310                assert(domain =~= int::range_set(0int, ps as int / new_size as int));
311                vstd::set_lib::range_set_properties::<int>(0int, ps as int / new_size as int);
312                domain.lemma_map_finite(|n:int| Self::split_index(m, new_size, n as usize));
313                vstd::set::axiom_set_union_finite(
314                    v.mappings.remove(m),
315                    domain.map(|n:int| Self::split_index(m, new_size, n as usize)));
316            };
317            assert(f.finite()) by {
318                vstd::set::axiom_set_intersect_finite::<Mapping>(
319                    new_self.mappings,
320                    Set::new(|m3: Mapping| m3.va_range.start <= new_self.cur_va < m3.va_range.end));
321            };
322            vstd::set::axiom_set_choose_len(f);
323        };
324
325        // m2 ∈ new_self.mappings = (v.mappings - {m}) ∪ new_mappings.
326        // If m2 ∈ v.mappings - {m}: m2 ≠ m, m2 ∈ v.mappings, m2 covers cur_va.
327        //   But m also covers cur_va and m ∈ v.mappings. From inv (non-overlapping): m == m2.
328        //   Contradiction with m2 ≠ m.
329        // So m2 ∈ new_mappings, hence m2.page_size == new_size < m.page_size.
330        if v.mappings.contains(m2) && m2 != m {
331            // Both m and m2 are in v.mappings and both cover cur_va. Non-overlapping ⇒ m == m2.
332            assert(m.va_range.end <= m2.va_range.start || m2.va_range.end <= m.va_range.start);
333            // But both cover cur_va: m.start <= cur_va < m.end AND m2.start <= cur_va < m2.end.
334            // Contradiction with disjointness.
335            assert(false);
336        }
337        // So m2 ∉ v.mappings or m2 == m.
338        // m2 ∈ new_self.mappings = (v.mappings - {m}) ∪ new_mappings.
339        // If m2 == m: m ∉ (v.mappings - {m}), so m2 ∈ new_mappings. m2.page_size = new_size. ✓
340        // If m2 ∉ v.mappings: m2 ∉ (v.mappings - {m}), so m2 ∈ new_mappings. m2.page_size = new_size. ✓
341        // Either way: m2.page_size == new_size < m.page_size.
342        // m2 ∉ (v.mappings - {m}), so m2 ∈ new_mappings.
343        // All elements of new_mappings have page_size == new_size.
344        assert(!v.mappings.remove(m).contains(m2));
345        let new_mappings = Set::<int>::new(
346            |n:int| 0 <= n < ps as int / new_size as int
347        ).map(|n:int| Self::split_index(m, new_size, n as usize));
348        assert(new_mappings.contains(m2));
349        let k = choose|k: int| 0 <= k < ps as int / new_size as int
350            && #[trigger] Self::split_index(m, new_size, k as usize) == m2;
351        assert(m2.page_size == new_size);
352    }
353
354    pub open spec fn split_if_mapped_huge_spec(self, new_size: usize) -> Self {
355        let m = self.query_mapping();
356        let size = m.page_size;
357        let new_mappings = Set::<int>::new(|n:int| 0 <= n < size / new_size).map(|n:int| Self::split_index(m, new_size, n as usize));
358        CursorView {
359            cur_va: self.cur_va,
360            mappings: self.mappings - set![m] + new_mappings,
361            ..self
362        }
363    }
364
365    pub open spec fn split_while_huge(self, size: usize) -> Self
366        decreases self.query_mapping().page_size when self.inv()
367    {
368        if self.present() {
369            let m = self.query_mapping();
370            if m.page_size > size {
371                let new_size = m.page_size / NR_ENTRIES;
372                let new_self = self.split_if_mapped_huge_spec(new_size);
373                proof {
374                    let f = self.mappings.filter(|m2: Mapping| m2.va_range.start <= self.cur_va < m2.va_range.end);
375                    vstd::set::axiom_set_intersect_finite::<Mapping>(
376                        self.mappings, Set::new(|m2: Mapping| m2.va_range.start <= self.cur_va < m2.va_range.end));
377                    vstd::set::axiom_set_choose_len(f);
378                    assert(self.mappings.contains(m));
379                    assert(m.inv());
380                    assert(NR_ENTRIES == 512);
381                    assert(m.page_size % (m.page_size / 512usize) == 0) by {
382                        if m.page_size == 4096 { assert(4096usize % (4096usize / 512usize) == 0); }
383                        else if m.page_size == 2097152 { assert(2097152usize % (2097152usize / 512usize) == 0); }
384                        else { assert(1073741824usize % (1073741824usize / 512usize) == 0); }
385                    };
386                    Self::split_if_mapped_huge_spec_preserves_present(self, new_size);
387                    Self::split_if_mapped_huge_spec_decreases_page_size(self, new_size);
388                }
389                new_self.split_while_huge(size)
390            } else {
391                self
392            }
393        } else {
394            self
395        }
396    }
397
398    /// `split_while_huge` only modifies `mappings`, not `cur_va`.
399    pub broadcast proof fn lemma_split_while_huge_preserves_cur_va(self, size: usize)
400        requires self.inv(),
401        ensures #[trigger] self.split_while_huge(size).cur_va == self.cur_va
402        decreases self.query_mapping().page_size
403    {
404        if self.present() {
405            let m = self.query_mapping();
406            if m.page_size > size {
407                let new_size = m.page_size / NR_ENTRIES;
408                let new_self = self.split_if_mapped_huge_spec(new_size);
409                assert(new_self.inv()) by { admit() }; // split_if_mapped_huge_spec preserves inv
410                // Decreases: new_self.query_mapping().page_size < m.page_size
411                let f = self.mappings.filter(|m2: Mapping| m2.va_range.start <= self.cur_va < m2.va_range.end);
412                vstd::set::axiom_set_intersect_finite::<Mapping>(
413                    self.mappings, Set::new(|m2: Mapping| m2.va_range.start <= self.cur_va < m2.va_range.end));
414                vstd::set::axiom_set_choose_len(f);
415                assert(m.inv());
416                
417                Self::split_if_mapped_huge_spec_decreases_page_size(self, new_size);
418                Self::lemma_split_while_huge_preserves_cur_va(new_self, size);
419            }
420        }
421    }
422
423    pub open spec fn remove_subtree(self, size: usize) -> Set<Mapping> {
424        let subtree = self.mappings.filter(|m: Mapping|
425            self.cur_va <= m.va_range.start < self.cur_va + size);
426        self.mappings - subtree
427    }
428
429    /// Inserts a mapping into the cursor. If there were previously mappings there,
430    /// they are removed. Note that multiple mappings might be removed if they overlap with
431    /// a new large mapping.
432    pub open spec fn map_spec(self, paddr: Paddr, size: usize, prop: PageProperty) -> Self {
433        let new = Mapping {
434            va_range: self.cur_slot_range(size),
435            pa_range: paddr..(paddr + size) as usize,
436            page_size: size,
437            property: prop,
438        };
439        let split_self = self.split_while_huge(size);
440        CursorView {
441            cur_va: split_self.align_up_spec(size),
442            mappings: split_self.remove_subtree(size) + set![new],
443            ..split_self
444        }
445    }
446
447    pub open spec fn map_simple(self, paddr: Paddr, size: usize, prop: PageProperty) -> Self {
448        let new = Mapping {
449            va_range: self.cur_slot_range(size),
450            pa_range: paddr..(paddr + size) as usize,
451            page_size: size,
452            property: prop,
453        };
454        CursorView {
455            cur_va: self.cur_va,
456            mappings: self.mappings + set![new],
457            ..self
458        }
459    }
460
461    /// Unmaps a range of virtual addresses from the current address up to `len` bytes.
462    /// It returns the number of mappings that were removed.
463    ///
464    /// Because the implementation may split huge pages that straddle the range
465    /// boundaries, the spec first applies `split_while_huge` at both `cur_va`
466    /// and `cur_va + len` to obtain a "base" set of mappings where all boundary
467    /// entries are at the finest granularity.  Mappings whose `va_range.start`
468    /// falls in `[cur_va, cur_va + len)` are then removed from this base.
469    pub open spec fn unmap_spec(self, len: usize, new_view: Self, num_unmapped: usize) -> bool {
470        // Split the mapping at the start boundary
471        let after_start_split = self.split_while_huge(PAGE_SIZE);
472        // Split the mapping at the end boundary
473        let at_end = CursorView { cur_va: (self.cur_va + len) as Vaddr, ..after_start_split };
474        let after_both_splits = at_end.split_while_huge(PAGE_SIZE);
475        let base = CursorView { cur_va: self.cur_va, ..after_both_splits };
476        let taken = base.mappings.filter(|m: Mapping|
477            self.cur_va <= m.va_range.start < self.cur_va + len);
478            &&& new_view.cur_va >= (self.cur_va + len) as Vaddr
479            &&& new_view.mappings == base.mappings - taken
480            &&& num_unmapped == taken.len() as usize
481    }
482
483    /// Composition law for `split_while_huge`:
484    /// splitting to a finer target `s2 <= s1` is the same as first splitting to `s1` and then
485    /// further splitting to `s2`.  This holds because `split_while_huge(s1)` leaves the current
486    /// mapping with `page_size <= s1`, so a subsequent `split_while_huge(s2)` (with `s2 <= s1`)
487    /// produces the same result as applying `split_while_huge(s2)` directly.
488    pub proof fn split_while_huge_compose(self, s1: usize, s2: usize)
489        requires
490            s2 <= s1,
491        ensures
492            self.split_while_huge(s2) == self.split_while_huge(s1).split_while_huge(s2),
493    { admit() }
494
495    /// When the current entry is absent or maps at `page_size <= size`, `split_while_huge(size)`
496    /// is a no-op.  Applying a second call with the same `size` therefore returns the same value.
497    pub proof fn split_while_huge_idempotent(self, size: usize)
498        ensures
499            self.split_while_huge(size).split_while_huge(size) == self.split_while_huge(size),
500    {
501        // Follows from split_while_huge_compose with s1 = s2 = size.
502        self.split_while_huge_compose(size, size);
503    }
504
505    /// When `split_while_huge(size)` is a no-op and the view is `present()`,
506    /// the mapping at `cur_va` already has `page_size <= size`.
507    ///
508    /// Follows from one step of unfolding: if `page_size > size`, the function
509    /// would recurse and modify mappings, so it couldn't be a no-op.
510    pub proof fn split_while_huge_noop_implies_page_size_le(self, size: usize)
511        requires
512            self.split_while_huge(size) == self,
513            self.present(),
514        ensures
515            self.query_mapping().page_size <= size,
516    {
517        // From the definition: present() && page_size > size ⇒ recurse.
518        // The recursion calls split_if_mapped_huge_spec which changes mappings
519        // (removes the old mapping, adds sub-mappings).  So the result ≠ self.
520        // Therefore page_size <= size.
521        admit()
522    }
523
524    /// When the mapping at `cur_va` is exactly one split-step above `size`
525    /// (i.e. `query_mapping().page_size / NR_ENTRIES == size`), one step of
526    /// `split_while_huge` equals `split_if_mapped_huge_spec`:
527    ///
528    /// `self.split_while_huge(size) == self.split_if_mapped_huge_spec(size)`
529    ///
530    /// This is because `split_while_huge` takes one step
531    /// `split_if_mapped_huge_spec(m.page_size / NR_ENTRIES)` = `split_if_mapped_huge_spec(size)`,
532    /// then the sub-mapping at `cur_va` has `page_size == size <= size`, so it stops.
533    pub proof fn split_while_huge_one_step(self, size: usize)
534        requires
535            self.present(),
536            self.query_mapping().page_size > size,
537            self.query_mapping().page_size / NR_ENTRIES == size,
538        ensures
539            self.split_while_huge(size).mappings
540                =~= self.split_if_mapped_huge_spec(size).mappings,
541    {
542        // Unfold split_while_huge one step:
543        // split_while_huge(size) = split_if_mapped_huge_spec(m.page_size / NR_ENTRIES).split_while_huge(size)
544        //                        = split_if_mapped_huge_spec(size).split_while_huge(size)
545        //
546        // After split_if_mapped_huge_spec(size), the sub-mapping at cur_va has page_size == size.
547        // split_while_huge(size) on this result: present, page_size <= size, returns self.
548        // So: split_while_huge(size) == split_if_mapped_huge_spec(size).
549        //
550        // The admits in split_while_huge's decreases check prevent direct unfolding.
551        admit()
552    }
553
554    /// Models `protect_next`: find the next mapping in range, split it to
555    /// `target_page_size` if it is a huge page, then update its property via `op`.
556    ///
557    /// `target_page_size` corresponds to the cursor level after `find_next_impl`
558    /// with `split_huge = true` — this is determined by the page table structure
559    /// and cannot be derived from the abstract view alone.
560    pub open spec fn protect_spec(self, len: usize, op: spec_fn(PageProperty) -> PageProperty, target_page_size: usize) -> (Self, Option<Range<Vaddr>>) {
561        let (find_cursor, next) = self.find_next_impl_spec(len, false, true);
562        if next is Some {
563            let found = next.unwrap();
564            // Position cursor at the found mapping and split to target size
565            let at_found = CursorView {
566                cur_va: found.va_range.start as Vaddr,
567                ..self
568            };
569            let split_view = at_found.split_while_huge(target_page_size);
570            // The mapping at cur_va in the split view is the one to protect
571            let split_mapping = split_view.query_mapping();
572            let new_mapping = Mapping {
573                property: op(split_mapping.property),
574                ..split_mapping
575            };
576            let new_cursor = CursorView {
577                cur_va: split_mapping.va_range.end,
578                mappings: split_view.mappings - set![split_mapping] + set![new_mapping],
579                ..self
580            };
581            (new_cursor, Some(split_mapping.va_range))
582        } else {
583            (find_cursor, None)
584        }
585    }
586}
587
588} // verus!