1use vstd::prelude::*;
4
5use vstd_extra::arithmetic::nat_align_down;
6use vstd_extra::ownership::{InvView, ModelOf, OwnerOf};
7use vstd_extra::prelude::Inv;
8
9use core::marker::PhantomData;
10use core::ops::Range;
11
12use super::{KERNEL_PAGE_TABLE, VMALLOC_VADDR_RANGE, KERNEL_BASE_VADDR, KERNEL_END_VADDR};
13use crate::{
14 mm::{
15 frame::{untyped::AnyUFrameMeta, Frame, Segment},
16 kspace::{KernelPtConfig, MappedItem},
17 largest_pages,
18 page_prop::PageProperty,
19 Paddr, Vaddr, PAGE_SIZE,
20 page_table::{
21 is_valid_range_spec, page_size,
22 Child, CursorMut, PageTable, PageTableConfig,
23 },
24 },
25};
26
27use crate::specs::arch::mm::{MAX_PADDR, PAGE_SIZE as SPEC_PAGE_SIZE, NR_LEVELS};
28use crate::mm::PagingConstsTrait;
29use crate::specs::arch::PagingConsts;
30use crate::mm::nr_subpage_per_huge;
31use crate::specs::task::InAtomicMode;
32use crate::specs::mm::page_table::cursor::{CursorOwner, CursorView};
33use crate::specs::mm::page_table::*;
34use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
35use crate::mm::page_table::PageTableGuard;
36use crate::mm::frame::DynFrame;
37use crate::mm::kspace::AnyFrameMeta;
38use crate::specs::mm::frame::mapping::frame_to_index_spec;
39
40verus! {
43
44pub open spec fn frame_as_dynframe<T: AnyFrameMeta>(frame: Frame<T>) -> DynFrame {
46 DynFrame { ptr: frame.ptr, _marker: PhantomData }
47}
48
49fn frame_into_dynframe<T: AnyUFrameMeta>(frame: Frame<T>) -> (res: DynFrame)
54 ensures
55 res == frame_as_dynframe(frame),
56{
57 proof { admit(); }
58 frame.into()
59}
60
61pub open spec fn frame_entry_wf<T: AnyFrameMeta>(
63 frame: Frame<T>,
64 prop: PageProperty,
65 entry_owner: EntryOwner<KernelPtConfig>,
66) -> bool {
67 let frame_mss = DynFrame { ptr: frame.ptr, _marker: PhantomData };
68 let item = MappedItem::Tracked(frame_mss, prop);
69 let (pa, level, prop_from_item) = KernelPtConfig::item_into_raw_spec(item);
70 Child::Frame(pa, level, prop_from_item).wf(entry_owner)
71}
72
73#[derive(Debug)]
74pub struct RangeAllocError;
75
76pub struct RangeAllocator;
77
78impl RangeAllocator {
79 #[verifier::external_body]
80 pub const fn new(_r: core::ops::Range<Vaddr>) -> Self {
81 Self
82 }
83
84 #[verifier::external_body]
85 pub fn alloc(&self, size: usize) -> (r: Result<core::ops::Range<Vaddr>, RangeAllocError>)
86 ensures
87 r == kvirt_alloc_spec(size),
88 {
89 unimplemented!()
90 }
91}
92
93#[verifier::external_body]
94pub fn disable_preempt<'a, G: InAtomicMode + 'a>() -> &'a G {
95 unimplemented!()
96}
97
98exec static KVIRT_AREA_ALLOCATOR: RangeAllocator = RangeAllocator::new(VMALLOC_VADDR_RANGE);
99
100pub open spec fn sum_page_sizes_spec(elems: Seq<(Paddr, u8)>, from: int, to: int) -> nat
102 decreases (to - from) when from <= to
103{
104 if from >= to {
105 0nat
106 } else {
107 page_size(elems[from].1) as nat
108 + sum_page_sizes_spec(elems, from + 1, to)
109 }
110}
111
112proof fn sum_page_sizes_extend_right(elems: Seq<(Paddr, u8)>, from: int, to: int)
114 requires
115 0 <= from <= to,
116 to < elems.len() as int,
117 ensures
118 sum_page_sizes_spec(elems, from, to + 1)
119 == sum_page_sizes_spec(elems, from, to)
120 + page_size(elems[to].1) as nat,
121 decreases to - from,
122{
123 if from < to {
124 sum_page_sizes_extend_right(elems, from + 1, to);
125 assert(sum_page_sizes_spec(elems, from, to) ==
127 page_size(elems[from].1) as nat
128 + sum_page_sizes_spec(elems, from + 1, to));
129 assert(sum_page_sizes_spec(elems, from, to + 1) ==
130 page_size(elems[from].1) as nat
131 + sum_page_sizes_spec(elems, from + 1, to + 1));
132 } else {
133 assert(sum_page_sizes_spec(elems, from, to) == 0nat);
135 assert(sum_page_sizes_spec(elems, from, to + 1) ==
136 page_size(elems[from].1) as nat
137 + sum_page_sizes_spec(elems, from + 1, to + 1));
138 assert(sum_page_sizes_spec(elems, from + 1, to + 1) == 0nat);
139 }
140}
141
142proof fn sum_page_sizes_mono(elems: Seq<(Paddr, u8)>, from: int, to1: int, to2: int)
144 requires
145 0 <= from <= to1 <= to2,
146 to2 <= elems.len() as int,
147 ensures
148 sum_page_sizes_spec(elems, from, to1) <= sum_page_sizes_spec(elems, from, to2),
149 decreases to2 - to1,
150{
151 if to1 < to2 {
152 sum_page_sizes_extend_right(elems, from, to2 - 1);
153 sum_page_sizes_mono(elems, from, to1, to2 - 1);
154 }
155}
156
157#[verifier::external_body]
166fn collect_largest_pages(
167 va: Vaddr,
168 pa: Paddr,
169 len: usize,
170) -> (res: alloc::vec::Vec<(Paddr, u8)>)
171 ensures
172 forall |i: int| 0 <= i < res@.len() ==> (#[trigger] res@[i]).0 % PAGE_SIZE == 0,
173 forall |i: int| 0 <= i < res@.len() ==> 1 <= (#[trigger] res@[i]).1 <= NR_LEVELS,
174 forall |i: int| 0 <= i < res@.len() ==>
175 (#[trigger] res@[i]).1 <= KernelPtConfig::HIGHEST_TRANSLATION_LEVEL(),
176 sum_page_sizes_spec(res@, 0, res@.len() as int) == len as nat,
177 forall |i: int| 0 <= i < res@.len() ==>
178 (va as nat + #[trigger] sum_page_sizes_spec(res@, 0, i))
179 % page_size(res@[i].1) as nat == 0,
180 forall |i: int| 0 <= i < res@.len() ==>
182 (#[trigger] res@[i]).0 as nat == pa as nat + sum_page_sizes_spec(res@, 0, i),
183{
184 largest_pages::<KernelPtConfig>(va, pa, len).collect()
185}
186
187#[verifier::external_body]
188pub(crate) fn get_kernel_page_table() -> (r: &'static PageTable<KernelPtConfig>)
189 ensures
190 true,
191{
192 KERNEL_PAGE_TABLE.get().unwrap()
193}
194
195pub uninterp spec fn kvirt_alloc_spec(size: usize) -> Result<core::ops::Range<Vaddr>, RangeAllocError>;
197
198pub axiom fn kvirt_alloc_succeeds(area_size: usize)
199 requires
200 0 < area_size <= usize::MAX / 2,
201 ensures
202 kvirt_alloc_spec(area_size).is_ok();
203
204pub axiom fn kvirt_alloc_range_bounds(area_size: usize, map_offset: usize, r: core::ops::Range<Vaddr>)
205 ensures
206 kvirt_alloc_spec(area_size) == Ok::<core::ops::Range<Vaddr>, RangeAllocError>(r)
207 ==> r.start <= r.end
208 && (r.end - r.start) >= area_size
209 && map_offset <= r.end - r.start
210 && r.start + map_offset <= usize::MAX
211 && r.start % PAGE_SIZE == 0
212 && r.end % PAGE_SIZE == 0
213 && KERNEL_BASE_VADDR <= r.start
214 && r.end <= KERNEL_END_VADDR
215;
216
217pub axiom fn axiom_kernel_range_valid(r: core::ops::Range<Vaddr>)
220 ensures
221 (KERNEL_BASE_VADDR <= r.start
222 && r.end <= KERNEL_END_VADDR
223 && r.start < r.end
224 && r.start % PAGE_SIZE == 0
225 && r.end % PAGE_SIZE == 0)
226 ==> is_valid_range_spec::<KernelPtConfig>(&r);
227
228#[derive(Debug)]
242pub struct KVirtArea {
243 pub range: Range<Vaddr>,
244}
245
246pub tracked struct KVirtAreaOwner {
247 pt_owner: PageTableOwner<KernelPtConfig>,
248}
249
250impl KVirtAreaOwner {
251 pub closed spec fn cursor_view_at(self, addr: Vaddr) -> CursorView<KernelPtConfig> {
254 CursorView {
255 cur_va: nat_align_down(addr as nat, SPEC_PAGE_SIZE as nat) as Vaddr,
256 mappings: self.pt_owner.view_rec(self.pt_owner.0.value.path),
257 phantom: PhantomData,
258 }
259 }
260}
261
262impl Inv for KVirtArea {
263 open spec fn inv(self) -> bool {
264 &&& KERNEL_BASE_VADDR <= self.range.start
265 &&& self.range.end <= KERNEL_END_VADDR
266 }
267}
268
269#[verus_verify]
270impl KVirtArea {
271
272 pub fn start(&self) -> Vaddr {
273 self.range.start
274 }
275
276 pub fn end(&self) -> Vaddr {
277 self.range.end
278 }
279
280 pub fn range(&self) -> Range<Vaddr> {
281 self.range.start..self.range.end
282 }
283
284 #[cfg(ktest)]
285 pub fn len(&self) -> usize {
286 self.range.len()
287 }
288
289 pub open spec fn query_some_condition(self, owner: KVirtAreaOwner, addr: Vaddr) -> bool {
291 owner.cursor_view_at(addr).present()
292 }
293
294 pub open spec fn query_some_ensures(
299 self,
300 owner: KVirtAreaOwner,
301 addr: Vaddr,
302 r: Option<super::MappedItem>,
303 ) -> bool {
304 r is Some && owner.cursor_view_at(addr).query_item_spec(r.unwrap()) is Some
305 }
306
307 pub open spec fn query_none_ensures(r: Option<super::MappedItem>) -> bool {
309 r is None
310 }
311
312 #[verifier::external_body]
324 #[verus_spec(r =>
325 with Tracked(owner): Tracked<KVirtAreaOwner>,
326 requires
327 self.inv(),
328 self.range.start <= addr && addr < self.range.end,
329 ensures
330 self.query_some_condition(owner, addr) ==> self.query_some_ensures(owner, addr, r),
331 !self.query_some_condition(owner, addr) ==> Self::query_none_ensures(r),
332 )]
333 pub fn query<A: InAtomicMode>(&self, addr: Vaddr) -> Option<super::MappedItem>
334 {
335 use align_ext::AlignExt;
336
337 let start = addr.align_down(PAGE_SIZE);
338 let vaddr = start..start + PAGE_SIZE;
339 let page_table = get_kernel_page_table();
340
341 let preempt_guard = disable_preempt::<A>();
342 let (mut cursor, _cursor_owner) = page_table.cursor(preempt_guard, &vaddr).unwrap();
343 cursor.query().unwrap().1
344 }
345
346 #[verus_spec(
359 with Tracked(owner): Tracked<KVirtAreaOwner>,
360 Tracked(entry_owners): Tracked<&mut Seq<EntryOwner<KernelPtConfig>>>,
361 Tracked(regions): Tracked<&mut MetaRegionOwners>,
362 Tracked(guards): Tracked<&mut Guards<'a, KernelPtConfig>>,
363 Tracked(guard_perm): Tracked<GuardPerm<'a, KernelPtConfig>>
364 )]
365 pub fn map_frames<'a, T: AnyUFrameMeta, A: InAtomicMode + 'a>(
366 area_size: usize,
367 map_offset: usize,
368 frames: alloc::vec::Vec<Frame<T>>,
369 prop: PageProperty,
370 ) -> Self
371 requires
372 0 < area_size,
373 map_offset < area_size,
374 map_offset % PAGE_SIZE == 0,
375 area_size <= usize::MAX / 2,
376 old(entry_owners).len() == frames.len(),
377 frames.len() as int * PAGE_SIZE as int + map_offset as int <= area_size as int,
378 forall |i: int| 0 <= i < old(entry_owners).len() ==> (#[trigger] old(entry_owners)[i]).inv(),
379 forall |i: int| 0 <= i < frames.len() ==>
380 frame_entry_wf(frames[i], prop, #[trigger] old(entry_owners)[i]),
381 forall |i: int| 0 <= i < frames.len() ==>
382 CursorMut::<'a, KernelPtConfig, A>::item_not_mapped(
383 MappedItem::Tracked(frame_as_dynframe(#[trigger] frames[i]), prop),
384 *old(regions),
385 ),
386 forall |i: int, j: int| 0 <= i < j < frames.len() ==>
388 (#[trigger] old(entry_owners)[i]).frame.unwrap().mapped_pa != (#[trigger] old(entry_owners)[j]).frame.unwrap().mapped_pa,
389 {
390 proof {
391 kvirt_alloc_succeeds(area_size);
392 }
393
394 let range = KVIRT_AREA_ALLOCATOR.alloc(area_size).unwrap();
395
396 proof {
397 kvirt_alloc_range_bounds(area_size, map_offset, range);
398 }
399
400 let cursor_range = range.start + map_offset..range.end;
401
402 proof {
403 axiom_kernel_range_valid(cursor_range);
404 }
405
406 let page_table = get_kernel_page_table();
407 let preempt_guard = disable_preempt::<A>();
408
409 let (mut cursor, Tracked(cursor_owner)) =
410 (#[verus_spec(with Tracked(owner.pt_owner), Tracked(guard_perm), Tracked(regions), Tracked(guards))]
411 page_table.cursor_mut(preempt_guard, &cursor_range)).unwrap();
412
413 let ghost init_frames_len = frames.len();
414
415 assert forall |j: int| #![trigger entry_owners[j]]
416 0 <= j < frames@.len()
417 implies regions.slot_owners[frame_to_index_spec(entry_owners[j].frame.unwrap().mapped_pa)].path_if_in_pt is None
418 by {
419 let item = MappedItem::Tracked(frame_as_dynframe(frames@[j]), prop);
420 KernelPtConfig::item_into_raw_spec_tracked_level(item);
421 let (pa, level, _) = KernelPtConfig::item_into_raw_spec(item);
422 assert(level == 1u8);
423 vstd::arithmetic::mul::lemma_mul_by_zero_is_zero(
424 nr_subpage_per_huge::<PagingConsts>().ilog2() as int);
425 vstd::arithmetic::power::lemma_pow0(2int);
426 };
427
428 for frame in it: frames.into_iter()
429 invariant
430 cursor.inner.invariants(cursor_owner, *regions, *guards),
431 forall |entry: EntryOwner<KernelPtConfig>| entry_owners.contains(entry) ==>
432 cursor.map_cursor_requires(cursor_owner, *guards),
433 forall |i: int| 0 <= i < entry_owners.len() ==> (#[trigger]entry_owners[i]).inv(),
434 cursor.inner.va % PAGE_SIZE == 0,
435 cursor.inner.va as int + entry_owners.len() as int * PAGE_SIZE as int
436 <= cursor.inner.barrier_va.end as int,
437 cursor.inner.barrier_va.end % PAGE_SIZE == 0,
438 it.elements.len() == init_frames_len,
439 init_frames_len <= old(entry_owners).len(),
440 entry_owners.len() == old(entry_owners).len() - it.pos,
441 forall |j: int|
442 0 <= j < entry_owners.len() && it.pos + j < it.elements.len() ==>
443 frame_entry_wf(it.elements[it.pos + j], prop, entry_owners[j]),
444 forall |j: int| #![trigger entry_owners[j]]
446 0 <= j < it.elements.len() - it.pos ==>
447 regions.slot_owners[frame_to_index_spec(entry_owners[j].frame.unwrap().mapped_pa)].path_if_in_pt is None,
448 forall |i: int, j: int| 0 <= i < j < it.elements.len() - it.pos ==>
450 (#[trigger]entry_owners[i]).frame.unwrap().mapped_pa != (#[trigger] entry_owners[j]).frame.unwrap().mapped_pa,
451 {
452 proof {
453 assert(entry_owners.contains(entry_owners[0]));
454 assert(cursor.map_cursor_requires(cursor_owner, *guards));
455 assert(frame_entry_wf(frame, prop, entry_owners[0]));
456 }
457
458 let ghost cur_mapped_pa: usize = entry_owners[0].frame.unwrap().mapped_pa;
459 let ghost cur_pa_from_wf: usize = KernelPtConfig::item_into_raw_spec(
460 MappedItem::Tracked(frame_as_dynframe(it.elements.index(it.pos as int)), prop)).0;
461 let ghost pre_remove_owners: Seq<EntryOwner<KernelPtConfig>> = *entry_owners;
462
463 let tracked mut entry_owner = entry_owners.tracked_remove(0);
464
465 let dynframe = frame_into_dynframe(frame);
466 let item = MappedItem::Tracked(dynframe, prop);
468 assert(cursor.inner.invariants(cursor_owner, *regions, *guards));
469
470 let ghost regions_before_map = *regions;
471 let ghost old_cursor_model: CursorView<KernelPtConfig> = cursor.inner.model(cursor_owner);
472 let ghost old_cursor_owner_va = cursor_owner.va;
473 proof {
474 cursor_owner.view_preserves_inv(); cursor_owner.va.reflect_prop(cursor.inner.va);
476 let (pa, level, prop_from_item) = KernelPtConfig::item_into_raw_spec(item);
477 KernelPtConfig::item_into_raw_spec_level_bounds(item);
478 KernelPtConfig::item_into_raw_spec_tracked_level(item);
479 lemma_va_align_page_size_level_1(cursor.inner.va);
480 cursor_owner.locked_range_page_aligned();
481 let ghost diff: int = cursor.inner.barrier_va.end as int - cursor.inner.va as int;
482 vstd::arithmetic::mul::lemma_mul_by_zero_is_zero(
483 nr_subpage_per_huge::<PagingConsts>().ilog2() as int,
484 );
485 vstd::arithmetic::power::lemma_pow0(2int);
486
487 KernelPtConfig::item_into_raw_spec_tracked_level(item);
488 }
489
490 assert(CursorMut::<'a, KernelPtConfig, A>::item_slot_in_regions(
493 item, *regions,
494 )) by { admit() };
495 #[verus_spec(with Tracked(&mut cursor_owner), Tracked(entry_owner), Tracked(regions), Tracked(guards))]
496 let res = cursor.map(item);
497
498 proof {
499 pre_remove_owners.remove_ensures(0);
500 let cur_idx = frame_to_index_spec(cur_mapped_pa);
501
502 let (pa, level, prop_) = KernelPtConfig::item_into_raw_spec(item);
503 KernelPtConfig::item_into_raw_spec_tracked_level(item);
504
505 let split_self = old_cursor_model.split_while_huge(PAGE_SIZE);
506
507 let aligned_nat: nat = vstd_extra::arithmetic::nat_align_up(
508 split_self.cur_va as nat, PAGE_SIZE as nat);
509
510 vstd_extra::arithmetic::lemma_nat_align_up_sound(
511 split_self.cur_va as nat, PAGE_SIZE as nat);
512
513 CursorView::<KernelPtConfig>::lemma_split_while_huge_preserves_cur_va(
514 old_cursor_model, PAGE_SIZE);
515
516 assert(aligned_nat == vstd_extra::arithmetic::nat_align_up(
517 old_cursor_owner_va.to_vaddr() as nat, PAGE_SIZE as nat));
518 old_cursor_owner_va.align_diff(1);
519
520 assert(vstd_extra::arithmetic::nat_align_down(
521 old_cursor_owner_va.to_vaddr() as nat, PAGE_SIZE as nat)
522 == old_cursor_owner_va.to_vaddr() as nat);
523
524 cursor_owner.va.reflect_prop(cursor.inner.va);
525 }
526 }
527
528 Self { range }
529 }
530
531 #[verus_spec(
550 with Tracked(owner): Tracked<KVirtAreaOwner>,
551 Tracked(regions): Tracked<&mut MetaRegionOwners>,
552 Tracked(guards): Tracked<&mut Guards<'a, KernelPtConfig>>,
553 Tracked(guard_perm): Tracked<GuardPerm<'a, KernelPtConfig>>
554 )]
555 pub unsafe fn map_untracked_frames<A: InAtomicMode + 'a, 'a>(
556 area_size: usize,
557 map_offset: usize,
558 pa_range: Range<Paddr>,
559 prop: PageProperty,
560 ) -> Self
561 requires
562 0 < area_size,
563 area_size <= usize::MAX / 2,
564 map_offset % PAGE_SIZE == 0,
565 map_offset < area_size,
566 pa_range.end as nat - pa_range.start as nat <= area_size as nat - map_offset as nat,
568 pa_range.end <= MAX_PADDR,
570 pa_range.start % PAGE_SIZE == 0,
572 pa_range.end % PAGE_SIZE == 0,
573 old(regions).paddr_range_not_mapped(pa_range),
575 {
576 proof {
577 kvirt_alloc_succeeds(area_size);
578 }
579
580 let range = KVIRT_AREA_ALLOCATOR.alloc(area_size).unwrap();
581
582 proof {
583 kvirt_alloc_range_bounds(area_size, map_offset, range);
584 }
585
586 if pa_range.start < pa_range.end {
587 let len = pa_range.end - pa_range.start;
588 let va_range = range.start + map_offset..range.start + map_offset + len;
589
590 proof {
591 axiom_kernel_range_valid(va_range);
592}
594
595 let page_table = get_kernel_page_table();
596 let preempt_guard = disable_preempt::<A>();
597
598 let ghost pre_cursor_regions: MetaRegionOwners = *regions;
600
601 let (mut cursor, Tracked(cursor_owner)) =
602 (#[verus_spec(with Tracked(owner.pt_owner), Tracked(guard_perm), Tracked(regions), Tracked(guards))]
603 page_table.cursor_mut(preempt_guard, &va_range)).unwrap();
604
605 let pages = collect_largest_pages(va_range.start, pa_range.start, len);
606
607 for (pa, level) in it: pages.into_iter()
608 invariant
609 cursor.inner.invariants(cursor_owner, *regions, *guards),
610 forall |i: int| 0 <= i < it.elements.len() ==> (#[trigger] it.elements[i]).0 % PAGE_SIZE == 0,
611 forall |i: int| 0 <= i < it.elements.len() ==>
612 1 <= (#[trigger] it.elements[i]).1<= NR_LEVELS,
613 forall |i: int| 0 <= i < it.elements.len() ==>
614 (#[trigger] it.elements[i]).1 <= KernelPtConfig::HIGHEST_TRANSLATION_LEVEL(),
615 sum_page_sizes_spec(it.elements, 0, it.elements.len() as int) == len as nat,
616 forall |i: int| 0 <= i < it.elements.len() ==>
617 (va_range.start as nat + #[trigger] sum_page_sizes_spec(it.elements, 0, i))
618 % page_size(it.elements[i].1) as nat == 0,
619 cursor.inner.va as nat
621 == va_range.start as nat
622 + sum_page_sizes_spec(it.elements, 0, it.pos as int),
623 cursor.inner.barrier_va.end == va_range.start + len,
624 it.pos <= it.elements.len(),
625 it.pos < it.elements.len() ==>
627 cursor.map_cursor_requires(cursor_owner, *guards),
628 forall |i: int| #![auto] 0 <= i < it.elements.len() ==>
630 it.elements[i].0 as nat
631 == pa_range.start as nat + sum_page_sizes_spec(it.elements, 0, i),
632 regions.paddr_range_not_mapped(
634 (pa_range.start as nat
635 + sum_page_sizes_spec(it.elements, 0, it.pos as int)) as usize
636 ..pa_range.end
637 ),
638 pa_range.end as nat == pa_range.start as nat + len as nat,
640 cursor.inner.guard_level == NR_LEVELS as u8,
642 pa_range.end <= MAX_PADDR,
644 {
645 let pos: Ghost<int> = Ghost(it.pos as int);
646
647 proof {
648 sum_page_sizes_extend_right(it.elements, 0, pos@);
649 sum_page_sizes_mono(it.elements, 0, pos@ + 1, it.elements.len() as int);
650 }
651
652 let item = MappedItem::Untracked(pa, level, prop);
653 proof_decl! {
654 let tracked entry_owner =
655 EntryOwner::<KernelPtConfig>::new_untracked_frame(pa, level, prop);
656 }
657
658 let ghost old_cursor_model: CursorView<KernelPtConfig> =
659 cursor.inner.model(cursor_owner);
660 let ghost old_cursor_owner_va = cursor_owner.va;
661 let ghost old_va: nat = cursor.inner.va as nat;
662
663 proof {
664 cursor_owner.view_preserves_inv(); cursor_owner.va.reflect_prop(cursor.inner.va);
666
667 KernelPtConfig::item_into_raw_spec_untracked(pa, level, prop);
668
669 sum_page_sizes_extend_right(it.elements, 0, pos@);
671 sum_page_sizes_mono(
672 it.elements, 0, pos@ + 1, it.elements.len() as int);
673 }
674
675 let ghost pre_map_regions: MetaRegionOwners = *regions;
677
678 assert(CursorMut::<'a, KernelPtConfig, A>::item_slot_in_regions(
680 item, *regions,
681 )) by { admit() };
682 #[verus_spec(with Tracked(&mut cursor_owner), Tracked(entry_owner), Tracked(regions), Tracked(guards))]
683 let _ = cursor.map(item);
684
685 proof {
687 KernelPtConfig::item_into_raw_spec_untracked(pa, level, prop);
688 let level_raw = KernelPtConfig::item_into_raw_spec(item).1;
689
690 let split_self = old_cursor_model.split_while_huge(
691 page_size(level_raw));
692
693 CursorView::<KernelPtConfig>::lemma_split_while_huge_preserves_cur_va(
694 old_cursor_model, page_size(level_raw));
695
696 let aligned_nat = vstd_extra::arithmetic::nat_align_up(
697 old_cursor_owner_va.to_vaddr() as nat,
698 page_size(level_raw) as nat,
699 );
700
701 lemma_page_size_ge_page_size(level_raw);
702
703 vstd_extra::arithmetic::lemma_nat_align_up_sound(
704 old_cursor_owner_va.to_vaddr() as nat,
705 page_size(level_raw) as nat,
706 );
707
708 old_cursor_owner_va.align_diff(level_raw as int);
709
710 sum_page_sizes_extend_right(it.elements, 0, pos@);
711
712 let pa_next_nat = pa_range.start as nat + sum_page_sizes_spec(it.elements, 0, pos@ + 1);
713 assert(pa_next_nat == pa as nat + page_size(level) as nat);
714
715 if pos@ + 1 < it.elements.len() as int {
716 let next_level = it.elements[pos@ + 1].1;
717 sum_page_sizes_extend_right(it.elements, 0, pos@ + 1);
718 sum_page_sizes_mono(it.elements, 0, pos@ + 2, it.elements.len() as int);
719 lemma_page_size_ge_page_size(next_level);
720 }
721 }
722 }
723 }
724
725 Self { range }
726 }
727}
728
729}