ostd/specs/mm/page_table/cursor/
page_table_cursor_specs.rs

1use vstd::prelude::*;
2
3use vstd::set_lib::*;
4
5use vstd_extra::ownership::*;
6use vstd_extra::prelude::TreePath;
7
8use crate::mm::page_prop::PageProperty;
9use crate::mm::page_table::*;
10use crate::mm::{Paddr, PagingConstsTrait, PagingLevel, Vaddr};
11use crate::specs::arch::mm::{NR_ENTRIES, NR_LEVELS, PAGE_SIZE};
12use crate::specs::arch::paging_consts::PagingConsts;
13use crate::specs::mm::page_table::cursor::owners::*;
14use crate::specs::mm::page_table::*;
15use vstd_extra::arithmetic::*;
16
17use core::ops::Range;
18
19verus! {
20
21impl<C: PageTableConfig> PageTableOwner<C> {
22    pub uninterp spec fn new_cursor_owner_spec<'rcu>(self) -> (Self, CursorOwner<'rcu, C>);
23}
24
25/// A `CursorView` is not aware that the underlying structure of the page table is a tree.
26/// It treats the page table as an array of mappings of various sizes, and the cursor itself as the
27/// current virtual address, moving from low to high addresses. These functions specify its behavior
28/// and provide a simple interface for reasoning about its behavior.
29impl<C: PageTableConfig> CursorView<C> {
30
31    pub open spec fn item_into_mapping(va: Vaddr, item: C::Item) -> Mapping {
32        let (paddr, level, prop) = C::item_into_raw_spec(item);
33        let size = page_size(level);
34        Mapping {
35            va_range: va..(va + size) as usize,
36            pa_range: paddr..(paddr + size) as Paddr,
37            page_size: size,
38            property: prop,
39        }
40    }
41
42    /// This function checks if the current virtual address is mapped. It does not correspond
43    /// to a cursor method itself, but defines what it means for an entry to present:
44    /// there is a mapping whose virtual address range contains the current virtual address.
45    pub open spec fn present(self) -> bool {
46        self.mappings.filter(|m: Mapping| m.va_range.start <= self.cur_va < m.va_range.end).len() > 0
47    }
48
49    pub open spec fn query_mapping(self) -> Mapping
50        recommends
51            self.present(),
52    {
53        self.mappings.filter(|m: Mapping| m.va_range.start <= self.cur_va < m.va_range.end).choose()
54    }
55
56    pub open spec fn query(self, paddr: Paddr, size: usize, prop: PageProperty) -> bool {
57        let m = self.query_mapping();
58        m.pa_range.start == paddr && m.page_size == size && m.property == prop
59    }
60
61    pub open spec fn query_range(self) -> Range<Vaddr> {
62        self.query_mapping().va_range
63    }
64
65    /// The VA range of the current slot when mapping a page of the given size.
66    /// Works for both present and absent mappings: when present, equals query_range() for
67    /// a mapping of that size; when absent, returns the aligned range that would be mapped.
68    pub open spec fn cur_slot_range(self, size: usize) -> Range<Vaddr> {
69        let start = nat_align_down(self.cur_va as nat, size as nat) as Vaddr;
70        start..(start as nat + size as nat) as Vaddr
71    }
72
73    /// This predicate specifies the behavior of the `query` method. It states that the current item
74    /// in the page table matches the given item, mapped at the resulting virtual address range.
75    pub open spec fn query_item_spec(self, item: C::Item) -> Option<Range<Vaddr>>
76        recommends
77            self.present(),
78    {
79        let (paddr, level, prop) = C::item_into_raw_spec(item);
80        let size = page_size(level);
81        if self.query(paddr, size, prop) {
82            Some(self.query_range())
83        } else {
84            None
85        }
86    }
87
88    /// The specification for the internal function, `find_next_impl`. It finds the next mapped virtual address
89    /// that is at most `len` bytes away from the current virtual address. TODO: add the specifications for
90    /// `find_unmap_subtree` and `split_huge`, which are used by other functions that call this one.
91    /// This returns a mapping rather than the address because that is useful when it's called as a subroutine.
92    pub open spec fn find_next_impl_spec(self, len: usize, find_unmap_subtree: bool, split_huge: bool) -> (Self, Option<Mapping>) {
93        let mappings_in_range = self.mappings.filter(|m: Mapping| self.cur_va <= m.va_range.start < self.cur_va + len);
94
95        if mappings_in_range.len() > 0 {
96            let mapping = mappings_in_range.find_unique_minimal(|m: Mapping, n: Mapping| m.va_range.start < n.va_range.start);
97            let view = CursorView {
98                cur_va: mapping.va_range.end,
99                ..self
100            };
101            (view, Some(mapping))
102        } else {
103            let view = CursorView {
104                cur_va: (self.cur_va + len) as Vaddr,
105                ..self
106            };
107            (view, None)
108        }
109    }
110
111    /// Actual specification for `find_next`. The cursor finds the next mapped virtual address
112    /// that is at most `len` bytes away from the current virtual address, returns it, and then
113    /// moves the cursor forward to the next end of its range.
114    pub open spec fn find_next_spec(self, len: usize) -> (Self, Option<Vaddr>) {
115        let (cursor, mapping) = self.find_next_impl_spec(len, false, false);
116        if mapping is Some {
117            (cursor, Some(mapping.unwrap().va_range.start))
118        } else {
119            (cursor, None)
120        }
121    }
122
123    /// Jump just sets the current virtual address to the given address.
124    pub open spec fn jump_spec(self, va: usize) -> Self {
125        CursorView {
126            cur_va: va as Vaddr,
127            ..self
128        }
129    }
130
131    pub open spec fn align_up_spec(self, size: usize) -> Vaddr {
132        nat_align_up(self.cur_va as nat, size as nat) as Vaddr
133    }
134
135    pub open spec fn split_index(m: Mapping, new_size: usize, n: usize) -> Mapping {
136        Mapping {
137            va_range: (m.va_range.start + n * new_size) as usize..(m.va_range.start + (n + 1) * new_size) as usize,
138            pa_range: (m.pa_range.start + n * new_size) as usize..(m.pa_range.start + (n + 1) * new_size) as usize,
139            page_size: new_size,
140            property: m.property,
141        }
142    }
143
144    pub open spec fn split_if_mapped_huge_spec(self, new_size: usize) -> Self {
145        let m = self.query_mapping();
146        let size = m.page_size;
147        let new_mappings = Set::<int>::new(|n:int| 0 <= n < size / new_size).map(|n:int| Self::split_index(m, new_size, n as usize));
148        CursorView {
149            cur_va: self.cur_va,
150            mappings: self.mappings - set![m] + new_mappings,
151            ..self
152        }
153    }
154
155    pub open spec fn split_while_huge(self, size: usize) -> Self
156        decreases self.query_mapping().page_size
157    {
158        if self.present() {
159            let m = self.query_mapping();
160            if m.page_size > size {
161                let new_size = m.page_size.ilog2() as usize;
162                let new_self = self.split_if_mapped_huge_spec(new_size);
163                proof {
164                    assert(new_self.present()) by { admit() };
165                    assert(new_self.query_mapping().page_size < m.page_size) by { admit() };
166                }
167                new_self.split_while_huge(size)
168            } else {
169                self
170            }
171        } else {
172            self
173        }
174    }
175
176    pub open spec fn remove_subtree(self, size: usize) -> Set<Mapping> {
177        let subtree = self.mappings.filter(|m: Mapping|
178            self.cur_va <= m.va_range.start < self.cur_va + size);
179        self.mappings - subtree
180    }
181
182    /// Inserts a mapping into the cursor. If there were previously mappings there,
183    /// they are removed. Note that multiple mappings might be removed if they overlap with
184    /// a new large mapping.
185    pub open spec fn map_spec(self, paddr: Paddr, size: usize, prop: PageProperty) -> Self {
186        let new = Mapping {
187            va_range: self.cur_slot_range(size),
188            pa_range: paddr..(paddr + size) as usize,
189            page_size: size,
190            property: prop,
191        };
192        let split_self = self.split_while_huge(size);
193        CursorView {
194            cur_va: split_self.align_up_spec(size),
195            mappings: split_self.remove_subtree(size) + set![new],
196            ..split_self
197        }
198    }
199
200    ///
201    pub open spec fn map_simple(self, paddr: Paddr, size: usize, prop: PageProperty) -> Self {
202        let new = Mapping {
203            va_range: self.cur_slot_range(size),
204            pa_range: paddr..(paddr + size) as usize,
205            page_size: size,
206            property: prop,
207        };
208        CursorView {
209            cur_va: self.cur_va,
210            mappings: self.mappings + set![new],
211            ..self
212        }
213    }
214
215    /// Unmaps a range of virtual addresses from the current address up to `len` bytes.
216    /// It returns the number of mappings that were removed.
217    pub open spec fn unmap_spec(self, len: usize, new_view: Self, num_unmapped: usize) -> bool {
218        let taken = self.mappings.filter(|m: Mapping|
219            self.cur_va <= m.va_range.start < self.cur_va + len);
220            &&& new_view.cur_va >= (self.cur_va + len) as Vaddr
221            &&& new_view.mappings == self.mappings - taken
222            &&& num_unmapped == taken.len() as usize
223    }
224
225    pub open spec fn protect_spec(self, len: usize, op: impl Fn(PageProperty) -> PageProperty) -> (Self, Option<Range<Vaddr>>) {
226        let (cursor, next) = self.find_next_impl_spec(len, false, true);
227        if next is Some {
228            // TODO: Model props in here
229            (cursor, Some(next.unwrap().va_range))
230        } else {
231            (cursor, None)
232        }
233    }
234}
235
236} // verus!