ostd/specs/mm/page_table/cursor/
cursor_steps.rs1use vstd::prelude::*;
2
3use vstd_extra::ownership::*;
4use vstd_extra::prelude::TreePath;
5
6use crate::mm::page_table::*;
7use crate::mm::{Paddr, PagingConstsTrait, PagingLevel, Vaddr};
8use crate::specs::arch::mm::{NR_ENTRIES, NR_LEVELS, PAGE_SIZE};
9use crate::specs::arch::paging_consts::PagingConsts;
10use crate::specs::mm::MetaRegionOwners;
11use crate::specs::mm::page_table::cursor::owners::*;
12use crate::specs::mm::page_table::node::GuardPerm;
13use crate::specs::mm::Guards;
14
15use core::ops::Range;
16
17verus! {
18
19impl<'rcu, C: PageTableConfig> CursorOwner<'rcu, C> {
20
21 pub open spec fn max_steps_subtree(level: usize) -> usize
24 decreases level,
25 {
26 if level <= 1 {
27 NR_ENTRIES()
28 } else {
29 (NR_ENTRIES() * (Self::max_steps_subtree((level - 1) as usize) + 1)) as usize
31 }
32 }
33
34 pub open spec fn max_steps_partial(self, level: usize) -> usize
37 decreases NR_LEVELS() - level,
38 when level <= NR_LEVELS()
39 {
40 if level == NR_LEVELS() {
41 0
42 } else {
43 let cont = self.continuations[(level - 1) as int];
45 let steps = Self::max_steps_subtree(level) * (NR_ENTRIES() - cont.idx);
47 let remaining_steps = self.max_steps_partial((level + 1) as usize);
49 (steps + remaining_steps) as usize
50 }
51 }
52
53 pub open spec fn max_steps(self) -> usize {
54 self.max_steps_partial(self.level as usize)
55 }
56
57 pub proof fn max_steps_partial_inv(self, other: Self, level: usize)
58 requires
59 self.inv(),
60 other.inv(),
61 self.level == other.level,
62 self.level <= level <= NR_LEVELS(),
63 forall |i: int|
64 #![trigger self.continuations[i].idx]
65 #![trigger other.continuations[i].idx]
66 self.level - 1 <= i < NR_LEVELS() ==> self.continuations[i].idx == other.continuations[i].idx,
67 ensures
68 self.max_steps_partial(level) == other.max_steps_partial(level),
69 decreases NR_LEVELS() - level,
70 {
71 if level < NR_LEVELS() {
72 self.max_steps_partial_inv(other, (level + 1) as usize);
73 }
74 }
75
76 pub open spec fn push_level_owner_spec(self, guard_perm: GuardPerm<'rcu, C>) -> Self
77 {
78 let cont = self.continuations[self.level - 1];
79 let (child, cont) = cont.make_cont_spec(self.va.index[self.level - 2] as usize, guard_perm);
80 let new_continuations = self.continuations.insert(self.level - 1, cont);
81 let new_continuations = new_continuations.insert(self.level - 2, child);
82
83 let new_level = (self.level - 1) as u8;
84 Self {
85 continuations: new_continuations,
86 level: new_level,
87 popped_too_high: false,
88 ..self
89 }
90 }
91
92 pub proof fn push_level_owner_decreases_steps(self, guard_perm: GuardPerm<'rcu, C>)
93 requires
94 self.inv(),
95 self.level > 0,
96 ensures
97 self.push_level_owner_spec(guard_perm).max_steps() < self.max_steps()
98 { admit() }
99
100 pub proof fn push_level_owner_preserves_va(self, guard_perm: GuardPerm<'rcu, C>)
101 requires
102 self.inv(),
103 self.level > 1,
104 ensures
105 self.push_level_owner_spec(guard_perm).va == self.va,
106 self.push_level_owner_spec(guard_perm).continuations[self.level - 2].idx == self.va.index[self.level - 2],
107 {
108 assert(self.va.index.contains_key(self.level - 2));
109 }
110
111 pub proof fn push_level_owner_preserves_invs(self, guard_perm: GuardPerm<'rcu, C>, regions: MetaRegionOwners, guards: Guards<'rcu, C>)
112 requires
113 self.inv(),
114 self.level > 1,
115 self.only_current_locked(guards),
116 self.nodes_locked(guards),
117 self.relate_region(regions),
118 ensures
119 self.push_level_owner_spec(guard_perm).inv(),
120 self.push_level_owner_spec(guard_perm).children_not_locked(guards),
121 self.push_level_owner_spec(guard_perm).nodes_locked(guards),
122 self.push_level_owner_spec(guard_perm).relate_region(regions),
123 { admit() }
124
125 #[verifier::returns(proof)]
126 pub proof fn push_level_owner(tracked &mut self, tracked guard_perm: Tracked<GuardPerm<'rcu, C>>)
127 requires
128 old(self).inv(),
129 old(self).level > 1,
130 ensures
131 *self == old(self).push_level_owner_spec(guard_perm@),
132 {
133 assert(self.va.index.contains_key(self.level - 2));
134
135 let ghost self0 = *self;
136 let tracked mut cont = self.continuations.tracked_remove(self.level - 1);
137 let ghost cont0 = cont;
138 let tracked child = cont.make_cont(self.va.index[self.level - 2] as usize, guard_perm);
139
140 assert((child, cont) == cont0.make_cont_spec(self.va.index[self.level - 2] as usize, guard_perm@));
141
142 self.continuations.tracked_insert(self.level - 1, cont);
143 self.continuations.tracked_insert(self.level - 2, child);
144
145 assert(self.continuations == self0.continuations.insert(self.level - 1, cont).insert(self.level - 2, child));
146
147 self.popped_too_high = false;
148
149 self.level = (self.level - 1) as u8;
150 }
151
152 pub open spec fn pop_level_owner_spec(self) -> (Self, GuardPerm<'rcu, C>)
153 {
154 let child = self.continuations[self.level - 1];
155 let cont = self.continuations[self.level as int];
156 let (new_cont, guard_perm) = cont.restore_spec(child);
157 let new_continuations = self.continuations.insert(self.level as int, new_cont);
158 let new_continuations = new_continuations.remove(self.level - 1);
159 let new_level = (self.level + 1) as u8;
160 let popped_too_high = if new_level >= self.guard_level { true } else { false };
161 (Self {
162 continuations: new_continuations,
163 level: new_level,
164 popped_too_high: popped_too_high,
165 ..self
166 }, guard_perm)
167 }
168
169 pub proof fn pop_level_owner_preserves_inv(self)
170 requires
171 self.inv(),
172 self.level < NR_LEVELS(),
173 self.in_locked_range(),
174 ensures
175 self.pop_level_owner_spec().0.inv(),
176 {
177 let child = self.continuations[self.level - 1];
178 let cont = self.continuations[self.level as int];
179 let (new_cont, _) = cont.restore_spec(child);
180 assert forall |i:int| #![auto] 0 <= i < NR_ENTRIES() && new_cont.children[i] is Some implies
181 new_cont.children[i].unwrap().value.parent_level == new_cont.level() by {
182 if i == cont.idx {
183 assert(child.entry_own.parent_level == cont.level())
184 }
185 }
186 assert(new_cont.inv());
187 }
188
189 pub proof fn pop_level_owner_preserves_invs(self, guards: Guards<'rcu, C>, regions: MetaRegionOwners)
190 requires
191 self.inv(),
192 self.level < NR_LEVELS(),
193 self.children_not_locked(guards),
194 self.nodes_locked(guards),
195 self.relate_region(regions),
196 ensures
197 self.pop_level_owner_spec().0.inv(),
198 self.pop_level_owner_spec().0.children_not_locked(guards),
199 self.pop_level_owner_spec().0.nodes_locked(guards),
200 self.pop_level_owner_spec().0.relate_region(regions),
201 { admit() }
202
203 #[verifier::returns(proof)]
204 pub proof fn pop_level_owner(tracked &mut self) -> (tracked guard_perm: GuardPerm<'rcu, C>)
205 requires
206 old(self).inv(),
207 old(self).level < NR_LEVELS(),
208 ensures
209 *self == old(self).pop_level_owner_spec().0,
210 guard_perm == old(self).pop_level_owner_spec().1,
211 {
212 let ghost self0 = *self;
213 let tracked mut parent = self.continuations.tracked_remove(self.level as int);
214 let tracked child = self.continuations.tracked_remove(self.level - 1);
215
216 let tracked guard_perm = parent.restore(child);
217
218 self.continuations.tracked_insert(self.level as int, parent);
219
220 assert(self.continuations == self0.continuations.insert(self.level as int, parent).remove(self.level - 1));
221
222 self.level = (self.level + 1) as u8;
223
224 if self.level >= self.guard_level {
225 self.popped_too_high = true;
226 }
227
228 guard_perm
229 }
230
231 pub open spec fn move_forward_owner_spec(self) -> Self
232 recommends
233 self.inv(),
234 self.level < NR_LEVELS(),
235 self.in_locked_range(),
236 decreases NR_LEVELS() - self.level when self.level <= NR_LEVELS()
237 {
238 if self.index() + 1 < NR_ENTRIES() {
239 self.inc_index()
240 } else if self.level < NR_LEVELS() {
241 self.pop_level_owner_spec().0.move_forward_owner_spec()
242 } else {
243 Self {
245 popped_too_high: false,
246 ..self
247 }
248 }
249 }
250
251 pub proof fn move_forward_increases_va(self)
252 requires
253 self.inv(),
254 self.level <= NR_LEVELS(),
255 self.in_locked_range(),
256 ensures
257 self.move_forward_owner_spec().va.to_vaddr() > self.va.to_vaddr(),
258 decreases NR_LEVELS() - self.level
259 {
260 if self.index() + 1 < NR_ENTRIES() {
261 self.inc_index_increases_va();
262 } else if self.level < NR_LEVELS() {
263 self.pop_level_owner_preserves_inv();
264 self.pop_level_owner_spec().0.move_forward_increases_va();
265 } else {
266 admit();
267 }
268 }
269
270 pub proof fn move_forward_not_popped_too_high(self)
271 requires
272 self.inv(),
273 self.level <= NR_LEVELS(),
274 self.in_locked_range(),
275 ensures
276 !self.move_forward_owner_spec().popped_too_high,
277 decreases NR_LEVELS() - self.level,
278 {
279 if self.level < NR_LEVELS() {
280 self.pop_level_owner_preserves_inv();
281 self.pop_level_owner_spec().0.move_forward_not_popped_too_high();
282 }
283 }
284
285 pub proof fn move_forward_owner_decreases_steps(self)
286 requires
287 self.inv(),
288 self.level <= NR_LEVELS(),
289 ensures
290 self.move_forward_owner_spec().max_steps() < self.max_steps()
291 { admit() }
292
293 pub proof fn move_forward_preserves_invs(self, guards: Guards<'rcu, C>, regions: MetaRegionOwners)
294 requires
295 self.inv(),
296 self.level <= NR_LEVELS(),
297 self.in_locked_range(),
298 self.children_not_locked(guards),
299 self.nodes_locked(guards),
300 self.relate_region(regions),
301 ensures
302 self.move_forward_owner_spec().children_not_locked(guards),
303 self.move_forward_owner_spec().nodes_locked(guards),
304 self.move_forward_owner_spec().relate_region(regions),
305 decreases NR_LEVELS() - self.level,
306 {
307 if self.index() + 1 < NR_ENTRIES() {
308
309 } else if self.level < NR_LEVELS() {
310 self.pop_level_owner_preserves_invs(guards, regions);
311 let popped = self.pop_level_owner_spec().0;
312 popped.move_forward_preserves_invs(guards, regions);
313 }
314 }
315
316}
317
318}