1#![allow(hidden_glob_reexports)]
2
3pub mod cursor;
4pub mod mapping_set_lemmas;
5pub mod node;
6mod owners;
7mod view;
8
9pub use cursor::*;
10pub use node::*;
11pub use owners::*;
12pub use view::*;
13
14use vstd::arithmetic::power2::pow2;
15use vstd::prelude::*;
16use vstd_extra::arithmetic::*;
17use vstd_extra::ghost_tree::TreePath;
18use vstd_extra::ownership::*;
19
20use crate::mm::page_table::page_size;
21use crate::mm::page_table::PageTableConfig;
22use crate::mm::{PagingLevel, Vaddr};
23use crate::specs::arch::mm::{NR_ENTRIES, NR_LEVELS, PAGE_SIZE};
24
25use align_ext::AlignExt;
26
27verus! {
28
29pub struct AbstractVaddr {
41 pub offset: int,
42 pub index: Map<int, int>,
43 pub leading_bits: int,
44}
45
46impl Inv for AbstractVaddr {
47 open spec fn inv(self) -> bool {
48 &&& 0 <= self.offset < PAGE_SIZE
49 &&& self.index.dom() =~= Set::new(|i: int| 0 <= i < NR_LEVELS)
51 &&& forall|i: int|
52 #![trigger self.index.contains_key(i)]
53 0 <= i < NR_LEVELS ==> {
54 &&& self.index.contains_key(i)
55 &&& 0 <= self.index[i] < NR_ENTRIES
56 }
57 &&& 0 <= self.leading_bits < 0x1_0000int
59 }
60}
61
62impl AbstractVaddr {
63 pub open spec fn from_vaddr(va: Vaddr) -> Self {
68 AbstractVaddr {
69 offset: (va % PAGE_SIZE) as int,
70 index: Map::new(
71 |i: int| 0 <= i < NR_LEVELS,
72 |i: int| ((va / pow2((12 + 9 * i) as nat) as usize) % NR_ENTRIES) as int,
73 ),
74 leading_bits: (va as int / 0x1_0000_0000_0000int),
75 }
76 }
77
78 pub proof fn from_vaddr_wf(va: Vaddr)
79 ensures
80 AbstractVaddr::from_vaddr(va).inv(),
81 {
82 let abs = AbstractVaddr::from_vaddr(va);
83 assert forall|i: int| #![trigger abs.index.contains_key(i)] 0 <= i < NR_LEVELS implies {
84 &&& abs.index.contains_key(i)
85 &&& 0 <= abs.index[i]
86 &&& abs.index[i] < NR_ENTRIES
87 } by {};
88 let va_i = va as int;
89 assert(0 <= abs.leading_bits < 0x1_0000int) by (nonlinear_arith)
90 requires
91 abs.leading_bits == va_i / 0x1_0000_0000_0000int,
92 0 <= va_i < 0x1_0000_0000_0000_0000int;
93 }
94
95 pub open spec fn to_vaddr(self) -> Vaddr {
98 (self.offset + self.to_vaddr_indices(0)
99 + self.leading_bits * 0x1_0000_0000_0000int) as Vaddr
100 }
101
102 pub open spec fn to_vaddr_indices(self, start: int) -> int
104 decreases NR_LEVELS - start,
105 when start <= NR_LEVELS
106 {
107 if start >= NR_LEVELS {
108 0
109 } else {
110 self.index[start] * pow2((12 + 9 * start) as nat) as int + self.to_vaddr_indices(
111 start + 1,
112 )
113 }
114 }
115
116 pub open spec fn reflect(self, va: Vaddr) -> bool {
118 self == Self::from_vaddr(va)
119 }
120
121 pub broadcast proof fn reflect_prop(self, va: Vaddr)
124 requires
125 self.inv(),
126 self.reflect(va),
127 ensures
128 #[trigger] self.to_vaddr() == va,
129 #[trigger] Self::from_vaddr(va) == self,
130 {
131 Self::from_vaddr_to_vaddr_roundtrip(va);
135 }
136
137 pub proof fn from_vaddr_to_vaddr_roundtrip(va: Vaddr)
143 ensures
144 Self::from_vaddr(va).to_vaddr() == va,
145 {
146 vstd::arithmetic::power2::lemma2_to64();
147 vstd::arithmetic::power2::lemma2_to64_rest();
148 let abs = Self::from_vaddr(va);
149 assert(abs.to_vaddr_indices(4) == 0);
150 assert(abs.to_vaddr_indices(3)
151 == abs.index[3] * pow2(39nat) as int + abs.to_vaddr_indices(4));
152 assert(abs.to_vaddr_indices(2)
153 == abs.index[2] * pow2(30nat) as int + abs.to_vaddr_indices(3));
154 assert(abs.to_vaddr_indices(1)
155 == abs.index[1] * pow2(21nat) as int + abs.to_vaddr_indices(2));
156 assert(abs.to_vaddr_indices(0)
157 == abs.index[0] * pow2(12nat) as int + abs.to_vaddr_indices(1));
158 assert(
159 va == (va % 4096usize)
160 + ((va / 4096usize) % 512usize) * 4096usize
161 + ((va / 0x20_0000usize) % 512usize) * 0x20_0000usize
162 + ((va / 0x4000_0000usize) % 512usize) * 0x4000_0000usize
163 + ((va / 0x80_0000_0000usize) % 512usize) * 0x80_0000_0000usize
164 + (va / 0x1_0000_0000_0000usize) * 0x1_0000_0000_0000usize
165 ) by (bit_vector);
166 }
167
168 pub broadcast proof fn reflect_from_vaddr(va: Vaddr)
170 ensures
171 #[trigger] Self::from_vaddr(va).reflect(va),
172 #[trigger] Self::from_vaddr(va).inv(),
173 {
174 Self::from_vaddr_wf(va);
175 }
176
177 pub broadcast proof fn reflect_to_vaddr(self)
179 requires
180 self.inv(),
181 ensures
182 #[trigger] self.reflect(self.to_vaddr()),
183 {
184 Self::to_vaddr_from_vaddr_roundtrip(self);
185 }
186
187 pub proof fn to_vaddr_from_vaddr_roundtrip(abs: Self)
190 requires
191 abs.inv(),
192 ensures
193 Self::from_vaddr(abs.to_vaddr()) == abs,
194 {
195 vstd::arithmetic::power2::lemma2_to64();
196 vstd::arithmetic::power2::lemma2_to64_rest();
197 abs.to_vaddr_bounded();
198 assert(abs.to_vaddr_indices(4) == 0);
199 assert(abs.to_vaddr_indices(3)
200 == abs.index[3] * pow2(39nat) as int + abs.to_vaddr_indices(4));
201 assert(abs.to_vaddr_indices(2)
202 == abs.index[2] * pow2(30nat) as int + abs.to_vaddr_indices(3));
203 assert(abs.to_vaddr_indices(1)
204 == abs.index[1] * pow2(21nat) as int + abs.to_vaddr_indices(2));
205 assert(abs.to_vaddr_indices(0)
206 == abs.index[0] * pow2(12nat) as int + abs.to_vaddr_indices(1));
207
208 assert(abs.index.contains_key(0));
209 assert(abs.index.contains_key(1));
210 assert(abs.index.contains_key(2));
211 assert(abs.index.contains_key(3));
212 let i0 = abs.index[0] as usize;
213 let i1 = abs.index[1] as usize;
214 let i2 = abs.index[2] as usize;
215 let i3 = abs.index[3] as usize;
216 let o = abs.offset as usize;
217 let tb = abs.leading_bits as usize;
218 let va = abs.to_vaddr();
219 assert(i0 < 512usize);
220 assert(i1 < 512usize);
221 assert(i2 < 512usize);
222 assert(i3 < 512usize);
223 assert(
224 va == o
225 + i0 * 4096usize
226 + i1 * 0x20_0000usize
227 + i2 * 0x4000_0000usize
228 + i3 * 0x80_0000_0000usize
229 + tb * 0x1_0000_0000_0000usize
230 );
231
232 assert(va % 4096usize == o) by (bit_vector)
233 requires
234 va == o + i0 * 4096usize + i1 * 0x20_0000usize + i2 * 0x4000_0000usize
235 + i3 * 0x80_0000_0000usize + tb * 0x1_0000_0000_0000usize,
236 o < 4096usize, i0 < 512usize, i1 < 512usize, i2 < 512usize,
237 i3 < 512usize, tb < 0x1_0000usize;
238 assert((va / 4096usize) % 512usize == i0) by (bit_vector)
239 requires
240 va == o + i0 * 4096usize + i1 * 0x20_0000usize + i2 * 0x4000_0000usize
241 + i3 * 0x80_0000_0000usize + tb * 0x1_0000_0000_0000usize,
242 o < 4096usize, i0 < 512usize, i1 < 512usize, i2 < 512usize,
243 i3 < 512usize, tb < 0x1_0000usize;
244 assert((va / 0x20_0000usize) % 512usize == i1) by (bit_vector)
245 requires
246 va == o + i0 * 4096usize + i1 * 0x20_0000usize + i2 * 0x4000_0000usize
247 + i3 * 0x80_0000_0000usize + tb * 0x1_0000_0000_0000usize,
248 o < 4096usize, i0 < 512usize, i1 < 512usize, i2 < 512usize,
249 i3 < 512usize, tb < 0x1_0000usize;
250 assert((va / 0x4000_0000usize) % 512usize == i2) by (bit_vector)
251 requires
252 va == o + i0 * 4096usize + i1 * 0x20_0000usize + i2 * 0x4000_0000usize
253 + i3 * 0x80_0000_0000usize + tb * 0x1_0000_0000_0000usize,
254 o < 4096usize, i0 < 512usize, i1 < 512usize, i2 < 512usize,
255 i3 < 512usize, tb < 0x1_0000usize;
256 assert((va / 0x80_0000_0000usize) % 512usize == i3) by (bit_vector)
257 requires
258 va == o + i0 * 4096usize + i1 * 0x20_0000usize + i2 * 0x4000_0000usize
259 + i3 * 0x80_0000_0000usize + tb * 0x1_0000_0000_0000usize,
260 o < 4096usize, i0 < 512usize, i1 < 512usize, i2 < 512usize,
261 i3 < 512usize, tb < 0x1_0000usize;
262 assert(va / 0x1_0000_0000_0000usize == tb) by (bit_vector)
263 requires
264 va == o + i0 * 4096usize + i1 * 0x20_0000usize + i2 * 0x4000_0000usize
265 + i3 * 0x80_0000_0000usize + tb * 0x1_0000_0000_0000usize,
266 o < 4096usize, i0 < 512usize, i1 < 512usize, i2 < 512usize,
267 i3 < 512usize, tb < 0x1_0000usize;
268
269 let back = Self::from_vaddr(va);
270 assert forall|i: int| 0 <= i < NR_LEVELS
271 implies #[trigger] back.index[i] == abs.index[i] by {
272 if i == 0 {} else if i == 1 {} else if i == 2 {} else if i == 3 {}
273 }
274 assert(back.index =~= abs.index);
275 }
276
277 pub broadcast proof fn reflect_eq(self, other: Self, va: Vaddr)
279 requires
280 #[trigger] self.reflect(va),
281 #[trigger] other.reflect(va),
282 ensures
283 self == other,
284 {
285 }
286
287 pub open spec fn align_down(self, level: int) -> Self
288 decreases level,
289 when level >= 1
290 {
291 if level == 1 {
292 AbstractVaddr { offset: 0, ..self }
293 } else {
294 let tmp = self.align_down(level - 1);
295 AbstractVaddr { index: tmp.index.insert(level - 2, 0), ..tmp }
296 }
297 }
298
299 pub proof fn align_down_inv(self, level: int)
300 requires
301 1 <= level < NR_LEVELS,
302 self.inv(),
303 ensures
304 self.align_down(level).inv(),
305 forall|i: int|
306 level <= i < NR_LEVELS ==> #[trigger] self.index[i - 1] == self.align_down(
307 level,
308 ).index[i - 1],
309 decreases level,
310 {
311 if level == 1 {
312 assert(self.align_down(1).index =~= self.index);
313 } else {
314 let tmp = self.align_down(level - 1);
315 self.align_down_inv(level - 1);
316 let new = self.align_down(level);
317 assert(new.index.dom() =~= Set::new(|i: int| 0 <= i < NR_LEVELS));
318 assert forall |i: int|
319 #![trigger new.index.contains_key(i)]
320 0 <= i < NR_LEVELS implies {
321 &&& new.index.contains_key(i)
322 &&& 0 <= new.index[i]
323 &&& new.index[i] < NR_ENTRIES
324 } by {
325 if i != level - 2 {
326 assert(tmp.index.contains_key(i));
327 }
328 }
329 }
330 }
331
332 pub proof fn align_down_leading_bits(self, level: int)
333 requires
334 1 <= level <= NR_LEVELS,
335 ensures
336 self.align_down(level).leading_bits == self.leading_bits,
337 decreases level,
338 {
339 if level > 1 {
340 self.align_down_leading_bits(level - 1);
341 }
342 }
343
344 pub proof fn align_down_shape(self, level: int)
345 requires
346 1 <= level <= NR_LEVELS,
347 self.inv(),
348 ensures
349 self.align_down(level).inv(),
350 self.align_down(level).offset == 0,
351 forall|i: int| 0 <= i < level - 1 ==> #[trigger] self.align_down(level).index[i] == 0,
352 forall|i: int|
353 level - 1 <= i < NR_LEVELS ==> #[trigger] self.align_down(level).index[i]
354 == self.index[i],
355 decreases level,
356 {
357 if level == 1 {
358 assert(self.align_down(1).index =~= self.index);
359 } else {
360 let tmp = self.align_down(level - 1);
361 self.align_down_shape(level - 1);
362 let new = self.align_down(level);
363 assert(new.index.dom() =~= Set::new(|i: int| 0 <= i < NR_LEVELS));
364 assert forall |i: int|
365 #![trigger new.index.contains_key(i)]
366 0 <= i < NR_LEVELS implies {
367 &&& new.index.contains_key(i)
368 &&& 0 <= new.index[i]
369 &&& new.index[i] < NR_ENTRIES
370 } by {
371 if i != level - 2 {
372 assert(tmp.index.contains_key(i));
373 }
374 }
375 }
376 }
377
378 pub proof fn to_vaddr_indices_drop_zero_range(self, from: int, to: int)
379 requires
380 self.inv(),
381 0 <= from <= to <= NR_LEVELS,
382 forall|i: int| from <= i < to ==> self.index[i] == 0,
383 ensures
384 self.to_vaddr_indices(from) == self.to_vaddr_indices(to),
385 decreases to - from,
386 {
387 if from < to {
388 self.to_vaddr_indices_drop_zero_range(from + 1, to);
389 }
390 }
391
392 pub proof fn to_vaddr_indices_eq_if_indices_eq(self, other: Self, start: int)
393 requires
394 self.inv(),
395 other.inv(),
396 0 <= start <= NR_LEVELS,
397 forall|i: int| start <= i < NR_LEVELS ==> self.index[i] == other.index[i],
398 ensures
399 self.to_vaddr_indices(start) == other.to_vaddr_indices(start),
400 decreases NR_LEVELS - start,
401 {
402 if start < NR_LEVELS {
403 self.to_vaddr_indices_eq_if_indices_eq(other, start + 1);
404 }
405 }
406
407 pub proof fn align_down_to_vaddr_eq_if_upper_indices_eq(self, other: Self, level: int)
412 requires
413 1 <= level <= NR_LEVELS,
414 self.inv(),
415 other.inv(),
416 forall|i: int| level - 1 <= i < NR_LEVELS ==> self.index[i] == other.index[i],
418 self.leading_bits == other.leading_bits,
420 ensures
421 self.align_down(level).to_vaddr() == other.align_down(level).to_vaddr(),
422 decreases level,
423 {
424 let lhs = self.align_down(level);
425 let rhs = other.align_down(level);
426
427 self.align_down_shape(level);
428 other.align_down_shape(level);
429 self.align_down_leading_bits(level);
430 other.align_down_leading_bits(level);
431
432 lhs.to_vaddr_indices_drop_zero_range(0, level - 1);
433 rhs.to_vaddr_indices_drop_zero_range(0, level - 1);
434 lhs.to_vaddr_indices_eq_if_indices_eq(rhs, level - 1);
435 assert(lhs.leading_bits == rhs.leading_bits);
436 }
437
438 pub axiom fn align_down_concrete(self, level: int)
439 requires
440 1 <= level <= NR_LEVELS,
441 ensures
442 self.align_down(level).reflect(
443 nat_align_down(
444 self.to_vaddr() as nat,
445 page_size(level as PagingLevel) as nat,
446 ) as Vaddr,
447 ),
448 ;
449
450 pub axiom fn align_down_zero_properties(self)
455 requires
456 self.inv(),
457 ensures
458 self.align_down(0).inv(),
459 forall|i: int|
460 #![auto]
461 0 <= i < NR_LEVELS ==> self.align_down(0).index[i] == self.index[i],
462 ;
463
464 pub axiom fn same_node_indices_match(
470 va1: Vaddr,
471 va2: Vaddr,
472 node_start: Vaddr,
473 level: PagingLevel,
474 )
475 requires
476 1 <= level,
477 level < NR_LEVELS,
478 node_start <= va1,
479 va1 < node_start + page_size((level + 1) as PagingLevel),
480 node_start <= va2,
481 va2 < node_start + page_size((level + 1) as PagingLevel),
482 node_start as nat % page_size((level + 1) as PagingLevel) as nat == 0,
483 ensures
484 forall|i: int|
485 #![auto]
486 level as int <= i < NR_LEVELS ==> Self::from_vaddr(va1).index[i]
487 == Self::from_vaddr(va2).index[i],
488 ;
489
490 pub open spec fn align_up(self, level: int) -> Self {
491 let lower_aligned = self.align_down(level);
492 lower_aligned.next_index(level)
493 }
494
495 pub axiom fn align_up_concrete(self, level: int)
496 requires
497 1 <= level <= NR_LEVELS,
498 ensures
499 self.align_up(level).reflect(
500 nat_align_up(
501 self.to_vaddr() as nat,
502 page_size(level as PagingLevel) as nat,
503 ) as Vaddr,
504 ),
505 ;
506
507 pub axiom fn align_diff(self, level: int)
508 requires
509 1 <= level <= NR_LEVELS,
510 ensures
511 nat_align_up(self.to_vaddr() as nat, page_size(level as PagingLevel) as nat)
512 == nat_align_down(self.to_vaddr() as nat, page_size(level as PagingLevel) as nat)
513 + page_size(level as PagingLevel),
514 nat_align_up(self.to_vaddr() as nat, page_size(level as PagingLevel) as nat)
515 < usize::MAX,
516 ;
517
518 pub proof fn align_up_carry(self, level: int)
521 requires
522 self.inv(),
523 1 <= level,
524 level < NR_LEVELS,
525 self.index[level - 1] == NR_ENTRIES - 1,
526 ensures
527 self.align_up(level) == self.align_up(level + 1),
528 decreases NR_LEVELS - level,
529 {
530 self.align_down_shape(level);
531 self.align_down_shape(level + 1);
532 assert(self.align_down(level).index.insert(level - 1, 0)
533 =~= self.align_down(level + 1).index);
534 }
535
536 pub open spec fn next_index(self, level: int) -> Self
537 decreases NR_LEVELS - level,
538 when 1 <= level <= NR_LEVELS
539 {
540 let index = self.index[level - 1];
541 let next_index = index + 1;
542 if next_index == NR_ENTRIES && level < NR_LEVELS {
543 let next_va = Self { index: self.index.insert(level - 1, 0), ..self };
544 next_va.next_index(level + 1)
545 } else if next_index == NR_ENTRIES && level == NR_LEVELS {
546 Self {
548 index: self.index.insert(level - 1, 0),
549 leading_bits: self.leading_bits + 1,
550 ..self
551 }
552 } else {
553 Self { index: self.index.insert(level - 1, next_index), ..self }
554 }
555 }
556
557 pub open spec fn wrapped(self, start_level: int, level: int) -> bool
558 decreases NR_LEVELS - level,
559 when 1 <= start_level <= level <= NR_LEVELS
560 {
561 &&& self.next_index(start_level).index[level - 1] == 0 ==> {
562 &&& self.index[level - 1] + 1 == NR_ENTRIES
563 &&& if level < NR_LEVELS {
564 self.wrapped(start_level, level + 1)
565 } else {
566 true
567 }
568 }
569 &&& self.next_index(start_level).index[level - 1] != 0 ==> self.index[level - 1] + 1
570 < NR_ENTRIES
571 }
572
573 pub proof fn use_wrapped(self, start_level: int, level: int)
574 requires
575 1 <= start_level <= level < NR_LEVELS,
576 self.wrapped(start_level, level),
577 self.next_index(start_level).index[level - 1] == 0,
578 ensures
579 self.index[level - 1] + 1 == NR_ENTRIES,
580 {
581 }
582
583 pub proof fn wrapped_unwrap(self, start_level: int, level: int)
584 requires
585 1 <= start_level <= level < NR_LEVELS,
586 self.wrapped(start_level, level),
587 self.next_index(start_level).index[level - 1] == 0,
588 ensures
589 self.wrapped(start_level, level + 1),
590 {
591 }
592
593 pub proof fn wrapped_after_carry_equiv(self, start_level: int, level: int)
594 requires
595 self.inv(),
596 1 <= start_level < level <= NR_LEVELS,
597 self.index[start_level - 1] + 1 == NR_ENTRIES,
598 ensures
599 ({
600 let next_va = Self {
601 index: self.index.insert(start_level - 1, 0),
602 ..self
603 };
604 self.wrapped(start_level, level) == next_va.wrapped(start_level + 1, level)
605 }),
606 decreases NR_LEVELS - level,
607 {
608 let next_va = Self { index: self.index.insert(start_level - 1, 0), ..self };
609 assert forall|i: int| start_level <= i < NR_LEVELS implies next_va.index[i]
610 == self.index[i] by {};
611 if level < NR_LEVELS {
612 self.wrapped_after_carry_equiv(start_level, level + 1);
613 }
614 }
615
616 pub proof fn wrapped_index_nonzero(self, start_level: int, level: int)
618 requires
619 1 <= start_level <= level <= NR_LEVELS,
620 self.wrapped(start_level, level),
621 self.index[level - 1] + 1 < NR_ENTRIES,
622 ensures
623 self.next_index(start_level).index[level - 1] != 0,
624 {
625 if self.next_index(start_level).index[level - 1] == 0 {
626 if level < NR_LEVELS { self.use_wrapped(start_level, level); }
627 }
628 }
629
630 pub proof fn wrapped_nonzero_at_level(
632 abs_va_down: Self, abs_next_va: Self,
633 start_level: int, level: int, owner_index_at_level: int,
634 )
635 requires
636 1 <= start_level <= level <= NR_LEVELS,
637 abs_va_down.wrapped(start_level, level),
638 abs_va_down.next_index(start_level) == abs_next_va,
639 abs_va_down.index[level - 1] == owner_index_at_level,
640 owner_index_at_level == 0,
641 ensures
642 abs_next_va.index[level - 1] != 0,
643 {
644 abs_va_down.wrapped_index_nonzero(start_level, level);
645 }
646
647 pub proof fn next_index_preserves_lower_indices(self, start_level: int, lower_level: int)
648 requires
649 self.inv(),
650 1 <= lower_level < start_level <= NR_LEVELS,
651 ensures
652 self.next_index(start_level).index[lower_level - 1] == self.index[lower_level - 1],
653 decreases NR_LEVELS - start_level,
654 {
655 let index = self.index[start_level - 1];
656 let next_index = index + 1;
657 if next_index == NR_ENTRIES && start_level < NR_LEVELS {
658 let next_va = Self { index: self.index.insert(start_level - 1, 0), ..self };
659 next_va.next_index_preserves_lower_indices(start_level + 1, lower_level);
660 } else if next_index == NR_ENTRIES && start_level == NR_LEVELS {
661 }
662 }
663
664 pub proof fn next_index_wrap_condition(self, level: int)
665 requires
666 self.inv(),
667 1 <= level <= NR_LEVELS,
668 ensures
669 self.wrapped(level, level),
670 decreases NR_LEVELS - level,
671 {
672 let index = self.index[level - 1];
673 let next_index = index + 1;
674 if next_index == NR_ENTRIES {
675 if level < NR_LEVELS {
676 let next_va = Self { index: self.index.insert(level - 1, 0), ..self };
677 next_va.next_index_wrap_condition(level + 1);
678 self.wrapped_after_carry_equiv(level, level + 1);
679 next_va.next_index_preserves_lower_indices(level + 1, level);
680 }
681 } else {
682 assert(self.index.contains_key(level - 1));
683 }
684 }
685
686 pub open spec fn compute_vaddr(self) -> Vaddr {
699 self.rec_compute_vaddr(0)
700 }
701
702 pub open spec fn rec_compute_vaddr(self, i: int) -> Vaddr
704 decreases NR_LEVELS - i,
705 when 0 <= i <= NR_LEVELS
706 {
707 if i >= NR_LEVELS {
708 self.offset as Vaddr
709 } else {
710 let shift = page_size((i + 1) as PagingLevel);
711 (self.index[i] * shift + self.rec_compute_vaddr(i + 1)) as Vaddr
712 }
713 }
714
715 pub open spec fn to_path(self, level: int) -> TreePath<NR_ENTRIES>
725 recommends
726 0 <= level < NR_LEVELS,
727 {
728 TreePath(self.rec_to_path(NR_LEVELS - 1, level))
729 }
730
731 pub open spec fn rec_to_path(self, abstract_level: int, bottom_level: int) -> Seq<usize>
735 decreases abstract_level - bottom_level,
736 when bottom_level <= abstract_level
737 {
738 if abstract_level < bottom_level {
739 seq![]
740 } else if abstract_level == bottom_level {
741 seq![self.index[abstract_level] as usize]
743 } else {
744 seq![self.index[abstract_level] as usize].add(
746 self.rec_to_path(abstract_level - 1, bottom_level),
747 )
748 }
749 }
750
751 #[verifier::rlimit(400)]
757 pub proof fn to_path_vaddr(self, level: int)
758 requires
759 self.inv(),
760 0 <= level < NR_LEVELS,
761 ensures
762 vaddr(self.to_path(level)) == self.align_down(level + 1).compute_vaddr(),
763 {
764 self.to_path_inv(level);
765 self.to_path_len(level);
766 lemma_page_size_spec_level1();
767 vstd::arithmetic::power2::lemma2_to64();
768 vstd::arithmetic::power2::lemma2_to64_rest();
769 crate::specs::arch::paging_consts::lemma_nr_subpage_per_huge_eq_nr_entries();
770 vstd_extra::external::ilog2::lemma_usize_ilog2_to32();
771 let path = self.to_path(level);
772 if level == 3 {
773 let aligned = self.align_down(4);
774 self.align_down_shape(4);
775 self.to_path_index(3, 0);
776 path.index_satisfies_elem_inv(0);
777 assert(vaddr(path) == path.index(0) * 0x80_0000_0000usize) by {
778 assert(rec_vaddr(path, 0) == (vaddr_make::<NR_LEVELS>(0, path.index(0)) + rec_vaddr(
779 path,
780 1,
781 )) as usize);
782 };
783 assert(aligned.rec_compute_vaddr(3) == self.index[3] * 0x80_0000_0000usize) by {
784 assert(aligned.rec_compute_vaddr(3) == (aligned.index[3] * page_size(4)
785 + aligned.rec_compute_vaddr(4)) as Vaddr);
786 };
787 assert(aligned.rec_compute_vaddr(1) == self.index[3] * 0x80_0000_0000usize) by {
788 assert(aligned.rec_compute_vaddr(1) == (aligned.index[1] * page_size(2)
789 + aligned.rec_compute_vaddr(2)) as Vaddr);
790 };
791 assert(aligned.compute_vaddr() == aligned.compute_vaddr());
792 assert(aligned.compute_vaddr() == (aligned.index[0] * page_size(1)
793 + aligned.rec_compute_vaddr(1)) as Vaddr);
794 assert(vaddr(path) == aligned.compute_vaddr());
795 } else if level == 2 {
796 let aligned = self.align_down(3);
797 self.align_down_shape(3);
798 self.to_path_index(2, 0);
799 self.to_path_index(2, 1);
800 path.index_satisfies_elem_inv(0);
801 path.index_satisfies_elem_inv(1);
802 assert(vaddr(path) == path.index(0) * 0x80_0000_0000usize + path.index(1)
803 * 0x4000_0000usize) by {
804 assert(vaddr(path) == rec_vaddr(path, 0));
805 assert(rec_vaddr(path, 1) == (vaddr_make::<NR_LEVELS>(1, path.index(1)) + rec_vaddr(
806 path,
807 2,
808 )) as usize);
809 };
810 assert(aligned.rec_compute_vaddr(3) == self.index[3] * 0x80_0000_0000usize) by {
811 assert(aligned.rec_compute_vaddr(3) == (aligned.index[3] * page_size(4)
812 + aligned.rec_compute_vaddr(4)) as Vaddr);
813 };
814 assert(aligned.rec_compute_vaddr(1) == self.index[2] * 0x4000_0000usize + self.index[3]
815 * 0x80_0000_0000usize) by {
816 assert(aligned.rec_compute_vaddr(1) == (aligned.index[1] * page_size(2)
817 + aligned.rec_compute_vaddr(2)) as Vaddr);
818 };
819 assert(vaddr(path) == aligned.compute_vaddr());
820 } else if level == 1 {
821 let aligned = self.align_down(2);
822 self.align_down_shape(2);
823 self.to_path_index(1, 0);
824 self.to_path_index(1, 1);
825 self.to_path_index(1, 2);
826 path.index_satisfies_elem_inv(0);
827 path.index_satisfies_elem_inv(1);
828 path.index_satisfies_elem_inv(2);
829 assert(vaddr(path) == path.index(0) * 0x80_0000_0000usize + path.index(1)
830 * 0x4000_0000usize + path.index(2) * 0x20_0000usize) by {
831 assert(vaddr(path) == rec_vaddr(path, 0));
832 assert(rec_vaddr(path, 3) == 0);
833 assert(rec_vaddr(path, 2) == (vaddr_make::<NR_LEVELS>(2, path.index(2)) + rec_vaddr(path, 3)) as usize);
834 assert(rec_vaddr(path, 1) == (vaddr_make::<NR_LEVELS>(1, path.index(1)) + rec_vaddr(path, 2)) as usize);
835 assert(rec_vaddr(path, 0) == (vaddr_make::<NR_LEVELS>(0, path.index(0)) + rec_vaddr(path, 1)) as usize);
836 assert(vaddr_make::<NR_LEVELS>(0, path.index(0)) == 0x80_0000_0000usize * path.index(0)) by (compute);
837 assert(vaddr_make::<NR_LEVELS>(1, path.index(1)) == 0x4000_0000usize * path.index(1)) by (compute);
838 assert(vaddr_make::<NR_LEVELS>(2, path.index(2)) == 0x20_0000usize * path.index(2)) by (compute);
839 };
840 assert(aligned.rec_compute_vaddr(3) == self.index[3] * 0x80_0000_0000usize) by {
841 assert(aligned.rec_compute_vaddr(3) == (aligned.index[3] * page_size(4)
842 + aligned.rec_compute_vaddr(4)) as Vaddr);
843 };
844 assert(aligned.rec_compute_vaddr(1) == self.index[1] * 0x20_0000usize + self.index[2]
845 * 0x4000_0000usize + self.index[3] * 0x80_0000_0000usize) by {
846 assert(aligned.rec_compute_vaddr(1) == (aligned.index[1] * page_size(2)
847 + aligned.rec_compute_vaddr(2)) as Vaddr);
848 };
849 assert(aligned.compute_vaddr() == aligned.compute_vaddr());
850 assert(aligned.compute_vaddr() == (aligned.index[0] * page_size(1)
851 + aligned.rec_compute_vaddr(1)) as Vaddr);
852 assert(vaddr(path) == aligned.compute_vaddr());
853 } else {
854 let aligned = self.align_down(1);
855 self.align_down_shape(1);
856 self.to_path_index(0, 0);
857 self.to_path_index(0, 1);
858 self.to_path_index(0, 2);
859 self.to_path_index(0, 3);
860 path.index_satisfies_elem_inv(0);
861 path.index_satisfies_elem_inv(1);
862 path.index_satisfies_elem_inv(2);
863 path.index_satisfies_elem_inv(3);
864 assert(vaddr(path) == path.index(0) * 0x80_0000_0000usize + path.index(1)
865 * 0x4000_0000usize + path.index(2) * 0x20_0000usize + path.index(3) * 0x1000usize)
866 by {
867 assert(vaddr(path) == rec_vaddr(path, 0));
868 assert(rec_vaddr(path, 4) == 0);
869 assert(rec_vaddr(path, 2) == (vaddr_make::<NR_LEVELS>(2, path.index(2)) + rec_vaddr(
870 path,
871 3,
872 )) as usize);
873 assert(rec_vaddr(path, 1) == (vaddr_make::<NR_LEVELS>(1, path.index(1)) + rec_vaddr(
874 path,
875 2,
876 )) as usize);
877 assert(vaddr_make::<NR_LEVELS>(0, path.index(0)) == 0x80_0000_0000usize
878 * path.index(0)) by (compute);
879 assert(vaddr_make::<NR_LEVELS>(1, path.index(1)) == 0x4000_0000usize * path.index(
880 1,
881 )) by (compute);
882 assert(vaddr_make::<NR_LEVELS>(2, path.index(2)) == 0x20_0000usize * path.index(2))
883 by (compute);
884 assert(vaddr_make::<NR_LEVELS>(3, path.index(3)) == 0x1000usize * path.index(3))
885 by (compute);
886 };
887 assert(aligned.rec_compute_vaddr(4) == 0);
888 assert(aligned.rec_compute_vaddr(3) == self.index[3] * 0x80_0000_0000usize) by {
889 assert(aligned.rec_compute_vaddr(3) == (aligned.index[3] * page_size(4)
890 + aligned.rec_compute_vaddr(4)) as Vaddr);
891 };
892 assert(aligned.rec_compute_vaddr(2) == self.index[2] * 0x4000_0000usize + self.index[3]
893 * 0x80_0000_0000usize) by {
894 };
895 assert(aligned.compute_vaddr() == self.index[0] * 0x1000usize + self.index[1]
896 * 0x20_0000usize + self.index[2] * 0x4000_0000usize + self.index[3]
897 * 0x80_0000_0000usize) by {
898 assert(aligned.compute_vaddr() == aligned.compute_vaddr());
899 assert(aligned.compute_vaddr() == (aligned.index[0] * page_size(1)
900 + aligned.rec_compute_vaddr(1)) as Vaddr);
901 };
902 assert(vaddr(path) == aligned.compute_vaddr());
903 }
904 }
905
906 pub axiom fn to_vaddr_is_compute_vaddr(self)
913 requires
914 self.inv(),
915 ensures
916 self.to_vaddr() as int
917 == self.compute_vaddr() as int
918 + self.leading_bits * 0x1_0000_0000_0000int,
919 ;
920
921 pub proof fn to_vaddr_indices_gap_bound(self, start: int)
922 requires
923 self.inv(),
924 0 <= start <= NR_LEVELS,
925 ensures
926 0 <= self.to_vaddr_indices(start),
927 self.to_vaddr_indices(start) + pow2((12 + 9 * start) as nat) as int <= pow2(
928 (12 + 9 * NR_LEVELS) as nat,
929 ) as int,
930 decreases NR_LEVELS - start,
931 {
932 vstd::arithmetic::power2::lemma2_to64();
933 vstd::arithmetic::power2::lemma2_to64_rest();
934 vstd::arithmetic::power2::lemma_pow2_pos((12 + 9 * start) as nat);
935 if start == NR_LEVELS {
936 } else {
937 let shift = pow2((12 + 9 * start) as nat) as int;
938 let next_shift = pow2((12 + 9 * (start + 1)) as nat) as int;
939 let top = pow2((12 + 9 * NR_LEVELS) as nat) as int;
940 self.to_vaddr_indices_gap_bound(start + 1);
941 assert(self.index.contains_key(start));
942 vstd::arithmetic::power2::lemma_pow2_adds((12 + 9 * start) as nat, 9nat);
943 vstd::arithmetic::mul::lemma_mul_inequality(self.index[start] + 1, 0x200int, shift);
944 vstd::arithmetic::mul::lemma_mul_is_distributive_add_other_way(
945 shift,
946 self.index[start],
947 1,
948 );
949 }
950 }
951
952 pub proof fn to_vaddr_bounded(self)
953 requires
954 self.inv(),
955 ensures
956 0 <= self.offset + self.to_vaddr_indices(0) < 0x1_0000_0000_0000int,
957 self.to_vaddr() as int
958 == self.offset
959 + self.to_vaddr_indices(0)
960 + self.leading_bits * 0x1_0000_0000_0000int,
961 self.offset
962 + self.to_vaddr_indices(0)
963 + self.leading_bits * 0x1_0000_0000_0000int
964 < 0x1_0000_0000_0000_0000int,
965 {
966 vstd::arithmetic::power2::lemma2_to64();
967 vstd::arithmetic::power2::lemma2_to64_rest();
968 self.to_vaddr_indices_gap_bound(0);
969 assert(pow2((12 + 9 * NR_LEVELS) as nat) as int == 0x1_0000_0000_0000int)
970 by (compute);
971 assert(self.leading_bits * 0x1_0000_0000_0000int
972 + 0x1_0000_0000_0000int
973 <= 0x1_0000 * 0x1_0000_0000_0000int) by (nonlinear_arith)
974 requires 0 <= self.leading_bits < 0x1_0000int;
975 assert(0x1_0000 * 0x1_0000_0000_0000int == 0x1_0000_0000_0000_0000int)
976 by (compute);
977 }
978
979 pub proof fn index_increment_adds_page_size(self, level: int)
980 requires
981 self.inv(),
982 1 <= level <= NR_LEVELS,
983 self.index[level - 1] + 1 < NR_ENTRIES,
984 ensures
985 (Self {
986 index: self.index.insert(level - 1, self.index[level - 1] + 1),
987 ..self
988 }).to_vaddr() == self.to_vaddr() + page_size(level as PagingLevel),
989 {
990 let new_va = Self {
991 index: self.index.insert(level - 1, self.index[level - 1] + 1),
992 ..self
993 };
994 assert forall|i: int| #![trigger new_va.index.contains_key(i)] 0 <= i < NR_LEVELS implies {
995 &&& new_va.index.contains_key(i)
996 &&& 0 <= new_va.index[i]
997 &&& new_va.index[i] < NR_ENTRIES
998 } by {
999 assert(self.index.contains_key(i));
1000 };
1001 assert(new_va.inv());
1002 self.to_vaddr_bounded();
1003 new_va.to_vaddr_bounded();
1004 assert(new_va.to_vaddr() as int - self.to_vaddr() as int
1005 == new_va.to_vaddr_indices(0) - self.to_vaddr_indices(0));
1006 vstd::arithmetic::power2::lemma2_to64();
1007 vstd::arithmetic::power2::lemma2_to64_rest();
1008 if level == 1 {
1009 lemma_page_size_spec_level1();
1010 assert forall|i: int| 1 <= i < NR_LEVELS implies new_va.index[i] == self.index[i] by {};
1011 new_va.to_vaddr_indices_eq_if_indices_eq(self, 1);
1012 assert((self.index[0] + 1) * 0x1000 == self.index[0] * 0x1000 + 0x1000)
1013 by (nonlinear_arith);
1014 } else if level == 2 {
1015 vstd_extra::external::ilog2::lemma_usize_ilog2_to32();
1016 assert forall|i: int| 2 <= i < NR_LEVELS implies new_va.index[i] == self.index[i] by {};
1017 new_va.to_vaddr_indices_eq_if_indices_eq(self, 2);
1018 assert(self.to_vaddr_indices(0) == self.index[0] * pow2(12nat) as int
1019 + self.to_vaddr_indices(1));
1020 assert((self.index[1] + 1) * 0x20_0000 == self.index[1] * 0x20_0000 + 0x20_0000)
1021 by (nonlinear_arith);
1022 assert(new_va.to_vaddr_indices(1) == self.to_vaddr_indices(1) + 0x20_0000);
1023 } else if level == 3 {
1024 vstd_extra::external::ilog2::lemma_usize_ilog2_to32();
1025 assert forall|i: int| 3 <= i < NR_LEVELS implies new_va.index[i] == self.index[i] by {};
1026 new_va.to_vaddr_indices_eq_if_indices_eq(self, 3);
1027 assert(new_va.to_vaddr_indices(2) == self.to_vaddr_indices(2) + 0x4000_0000);
1028 assert(new_va.to_vaddr_indices(1) == self.to_vaddr_indices(1) + 0x4000_0000);
1029 } else {
1030 vstd_extra::external::ilog2::lemma_usize_ilog2_to32();
1031 assert forall|i: int| 4 <= i < NR_LEVELS implies new_va.index[i] == self.index[i] by {};
1032 new_va.to_vaddr_indices_eq_if_indices_eq(self, 4);
1033 assert(self.to_vaddr_indices(1) == self.index[1] * pow2(21nat) as int
1034 + self.to_vaddr_indices(2));
1035 assert(self.to_vaddr_indices(2) == self.index[2] * pow2(30nat) as int
1036 + self.to_vaddr_indices(3));
1037 assert((self.index[3] + 1) * 0x80_0000_0000 == self.index[3] * 0x80_0000_0000
1038 + 0x80_0000_0000) by (nonlinear_arith);
1039 assert(new_va.to_vaddr_indices(3) == self.to_vaddr_indices(3) + 0x80_0000_0000);
1040 assert(new_va.to_vaddr_indices(2) == self.to_vaddr_indices(2) + 0x80_0000_0000);
1041 assert(new_va.to_vaddr_indices(1) == self.to_vaddr_indices(1) + 0x80_0000_0000);
1042 }
1043 }
1044
1045 pub proof fn to_path_len(self, level: int)
1047 requires
1048 0 <= level < NR_LEVELS,
1049 ensures
1050 self.to_path(level).len() == NR_LEVELS - level,
1051 {
1052 self.rec_to_path_len(NR_LEVELS - 1, level);
1053 }
1054
1055 proof fn rec_to_path_len(self, abstract_level: int, bottom_level: int)
1056 requires
1057 bottom_level <= abstract_level,
1058 ensures
1059 self.rec_to_path(abstract_level, bottom_level).len() == abstract_level - bottom_level
1060 + 1,
1061 decreases abstract_level - bottom_level,
1062 {
1063 if abstract_level > bottom_level {
1068 self.rec_to_path_len(abstract_level - 1, bottom_level);
1069 }
1070 }
1073
1074 pub proof fn to_path_inv(self, level: int)
1076 requires
1077 self.inv(),
1078 0 <= level < NR_LEVELS,
1079 ensures
1080 self.to_path(level).inv(),
1081 {
1082 self.to_path_len(level);
1083 assert forall|i: int| 0 <= i < self.to_path(level).len() implies TreePath::<
1084 NR_ENTRIES,
1085 >::elem_inv(#[trigger] self.to_path(level).index(i)) by {
1086 let j = NR_LEVELS - 1 - i;
1087 self.to_path_index(level, i);
1088 assert(self.index.contains_key(j));
1089 };
1090 }
1091}
1092
1093impl AbstractVaddr {
1095 proof fn rec_vaddr_eq_if_indices_eq(
1096 path1: TreePath<NR_ENTRIES>,
1097 path2: TreePath<NR_ENTRIES>,
1098 idx: int,
1099 )
1100 requires
1101 path1.inv(),
1102 path2.inv(),
1103 path1.len() == path2.len(),
1104 0 <= idx <= path1.len(),
1105 forall|i: int| idx <= i < path1.len() ==> path1.index(i) == path2.index(i),
1106 ensures
1107 rec_vaddr(path1, idx) == rec_vaddr(path2, idx),
1108 decreases path1.len() - idx,
1109 {
1110 if idx < path1.len() {
1111 path1.index_satisfies_elem_inv(idx);
1112 path2.index_satisfies_elem_inv(idx);
1113 Self::rec_vaddr_eq_if_indices_eq(path1, path2, idx + 1);
1114 }
1115 }
1116
1117 pub proof fn path_matches_vaddr(self, path: TreePath<NR_ENTRIES>)
1120 requires
1121 self.inv(),
1122 path.inv(),
1123 path.len() <= NR_LEVELS,
1124 forall|i: int| 0 <= i < path.len() ==> path.index(i) == self.index[NR_LEVELS - 1 - i],
1125 ensures
1126 vaddr(path) == self.align_down((NR_LEVELS - path.len() + 1) as int).compute_vaddr()
1127 - self.align_down((NR_LEVELS - path.len() + 1) as int).offset,
1128 {
1129 if path.len() == 0 {
1130 let aligned = self.align_down(5);
1131 self.align_down_shape(4);
1132 assert(aligned.index[3] == 0) by {
1134 assert(aligned == AbstractVaddr {
1135 index: self.align_down(4).index.insert(3, 0),
1136 ..self.align_down(4)
1137 });
1138 };
1139 assert(aligned.rec_compute_vaddr(4) == 0);
1140 assert(aligned.rec_compute_vaddr(3) == 0) by {
1141 assert(aligned.rec_compute_vaddr(3) == (aligned.index[3] * page_size(4)
1142 + aligned.rec_compute_vaddr(4)) as Vaddr);
1143 };
1144 assert(aligned.rec_compute_vaddr(2) == 0) by {
1145 assert(aligned.rec_compute_vaddr(2) == (aligned.index[2] * page_size(3)
1146 + aligned.rec_compute_vaddr(3)) as Vaddr);
1147 };
1148 assert(aligned.rec_compute_vaddr(1) == 0) by {
1149 assert(aligned.rec_compute_vaddr(1) == (aligned.index[1] * page_size(2)
1150 + aligned.rec_compute_vaddr(2)) as Vaddr);
1151 };
1152 } else {
1153 let level = (NR_LEVELS - path.len()) as int;
1154 assert(0 <= level < NR_LEVELS);
1155 self.to_path_inv(level);
1156 self.to_path_len(level);
1157 assert forall|i: int| 0 <= i < path.len() implies #[trigger]path.index(i) == self.to_path(
1158 level,
1159 ).index(i) by {
1160 self.to_path_index(level, i);
1161 };
1162 Self::rec_vaddr_eq_if_indices_eq(path, self.to_path(level), 0);
1163 self.to_path_vaddr(level);
1164 self.align_down_shape(level + 1);
1165 }
1166 }
1167
1168 pub proof fn to_path_index(self, level: int, i: int)
1171 requires
1172 self.inv(),
1173 0 <= level < NR_LEVELS,
1174 0 <= i < NR_LEVELS - level,
1175 ensures
1176 self.to_path(level).index(i) == self.index[NR_LEVELS - 1 - i],
1177 {
1178 self.to_path_len(level);
1179 self.rec_to_path_index(NR_LEVELS - 1, level, i);
1180 }
1181
1182 proof fn rec_to_path_index(self, abstract_level: int, bottom_level: int, i: int)
1183 requires
1184 self.inv(),
1185 0 <= bottom_level <= abstract_level < NR_LEVELS,
1186 0 <= i < abstract_level - bottom_level + 1,
1187 ensures
1188 self.rec_to_path(abstract_level, bottom_level).index(i) == self.index[abstract_level
1189 - i],
1190 decreases abstract_level - bottom_level,
1191 {
1192 assert(self.index.contains_key(abstract_level));
1193 if abstract_level == bottom_level {
1194 } else {
1195 let head = seq![self.index[abstract_level] as usize];
1196 let tail = self.rec_to_path(abstract_level - 1, bottom_level);
1197 let full = head.add(tail);
1198 if i == 0 {
1199 } else {
1200 self.rec_to_path_index(abstract_level - 1, bottom_level, i - 1);
1201 assert(0 <= i - 1 < tail.len()) by {
1202 self.rec_to_path_len(abstract_level - 1, bottom_level);
1203 };
1204 }
1205 }
1206 }
1207
1208 pub proof fn to_path_vaddr_concrete(self, level: int)
1213 requires
1214 self.inv(),
1215 0 <= level < NR_LEVELS,
1216 ensures
1217 vaddr(self.to_path(level)) as int
1218 + self.leading_bits * 0x1_0000_0000_0000int
1219 == nat_align_down(
1220 self.to_vaddr() as nat,
1221 page_size((level + 1) as PagingLevel) as nat,
1222 ) as int,
1223 {
1224 self.to_path_vaddr(level);
1225 let aligned = self.align_down(level + 1);
1226 self.align_down_shape(level + 1);
1227 aligned.to_vaddr_is_compute_vaddr();
1228 self.align_down_concrete(level + 1);
1229 aligned.reflect_prop(
1230 nat_align_down(
1231 self.to_vaddr() as nat,
1232 page_size((level + 1) as PagingLevel) as nat,
1233 ) as Vaddr,
1234 );
1235 self.align_down_leading_bits(level + 1);
1236 let nad = nat_align_down(
1242 self.to_vaddr() as nat,
1243 page_size((level + 1) as PagingLevel) as nat,
1244 );
1245 lemma_page_size_ge_page_size((level + 1) as PagingLevel);
1248 vstd_extra::arithmetic::lemma_nat_align_down_sound(
1249 self.to_vaddr() as nat,
1250 page_size((level + 1) as PagingLevel) as nat,
1251 );
1252 assert(nad <= self.to_vaddr() as nat);
1253 assert(nad <= usize::MAX);
1254 assert(aligned.leading_bits == self.leading_bits);
1255 assert(vaddr(self.to_path(level)) as int == aligned.compute_vaddr() as int);
1256 assert(aligned.to_vaddr() as int
1257 == aligned.compute_vaddr() as int
1258 + aligned.leading_bits * 0x1_0000_0000_0000int);
1259 assert(aligned.to_vaddr() == nad as Vaddr);
1260 assert(aligned.to_vaddr() as int == nad as int);
1262 }
1263
1264 pub proof fn vaddr_range_from_path(self, level: int)
1267 requires
1268 self.inv(),
1269 0 <= level < NR_LEVELS,
1270 ensures
1271 vaddr(self.to_path(level)) as int
1272 + self.leading_bits * 0x1_0000_0000_0000int
1273 <= self.to_vaddr() as int,
1274 (self.to_vaddr() as int)
1275 < vaddr(self.to_path(level)) as int
1276 + self.leading_bits * 0x1_0000_0000_0000int
1277 + page_size((level + 1) as PagingLevel) as int,
1278 {
1279 self.to_path_vaddr_concrete(level);
1280 let size = page_size((level + 1) as PagingLevel);
1281 let cur = self.to_vaddr() as nat;
1282 let start = vaddr(self.to_path(level));
1283
1284 assert(page_size((level + 1) as PagingLevel) >= PAGE_SIZE) by {
1285 lemma_page_size_ge_page_size((level + 1) as PagingLevel);
1286 };
1287 lemma_nat_align_down_sound(cur, size as nat);
1288 }
1289}
1290
1291}