1use vstd::prelude::*;
4
5use vstd_extra::arithmetic::nat_align_down;
6use vstd_extra::ownership::{InvView, ModelOf, OwnerOf};
7use vstd_extra::prelude::Inv;
8
9use core::marker::PhantomData;
10use core::ops::Range;
11
12use super::{KERNEL_PAGE_TABLE, VMALLOC_VADDR_RANGE, KERNEL_BASE_VADDR, KERNEL_END_VADDR};
13use crate::{
14 mm::{
15 frame::{untyped::AnyUFrameMeta, Frame, Segment},
16 kspace::{KernelPtConfig, MappedItem},
17 largest_pages,
18 page_prop::PageProperty,
19 Paddr, Vaddr, PAGE_SIZE,
20 page_table::{
21 is_valid_range_spec, page_size,
22 Child, CursorMut, PageTable, PageTableConfig,
23 },
24 },
25};
26
27use crate::specs::arch::mm::{MAX_PADDR, PAGE_SIZE as SPEC_PAGE_SIZE, NR_LEVELS};
28use crate::mm::PagingConstsTrait;
29use crate::specs::arch::PagingConsts;
30use crate::mm::nr_subpage_per_huge;
31use crate::specs::task::InAtomicMode;
32use crate::specs::mm::page_table::cursor::{CursorOwner, CursorView};
33use crate::specs::mm::page_table::*;
34use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
35use crate::mm::page_table::PageTableGuard;
36use crate::mm::frame::DynFrame;
37use crate::mm::kspace::AnyFrameMeta;
38use crate::specs::mm::frame::mapping::frame_to_index_spec;
39
40verus! {
43
44pub open spec fn frame_as_dynframe<T: AnyFrameMeta>(frame: Frame<T>) -> DynFrame {
46 DynFrame { ptr: frame.ptr, _marker: PhantomData }
47}
48
49#[verifier::external_body]
53fn frame_into_dynframe<T: AnyUFrameMeta>(frame: Frame<T>) -> (res: DynFrame)
54 ensures
55 res == frame_as_dynframe(frame),
56{
57 frame.into()
58}
59
60pub open spec fn frame_entry_wf<T: AnyFrameMeta>(
62 frame: Frame<T>,
63 prop: PageProperty,
64 entry_owner: EntryOwner<KernelPtConfig>,
65) -> bool {
66 let frame_mss = DynFrame { ptr: frame.ptr, _marker: PhantomData };
67 let item = MappedItem::Tracked(frame_mss, prop);
68 let (pa, level, prop_from_item) = KernelPtConfig::item_into_raw_spec(item);
69 Child::Frame(pa, level, prop_from_item).wf(entry_owner)
70}
71
72#[derive(Debug)]
73pub struct RangeAllocError;
74
75pub struct RangeAllocator;
76
77impl RangeAllocator {
78 #[verifier::external_body]
79 pub const fn new(_r: core::ops::Range<Vaddr>) -> Self {
80 Self
81 }
82
83 #[verifier::external_body]
84 pub fn alloc(&self, size: usize) -> (r: Result<core::ops::Range<Vaddr>, RangeAllocError>)
85 ensures
86 r == kvirt_alloc_spec(size),
87 {
88 unimplemented!()
89 }
90}
91
92#[verifier::external_body]
93pub fn disable_preempt<'a, G: InAtomicMode + 'a>() -> &'a G {
94 unimplemented!()
95}
96
97exec static KVIRT_AREA_ALLOCATOR: RangeAllocator = RangeAllocator::new(VMALLOC_VADDR_RANGE);
98
99pub open spec fn sum_page_sizes_spec(elems: Seq<(Paddr, u8)>, from: int, to: int) -> nat
101 decreases (to - from) when from <= to
102{
103 if from >= to {
104 0nat
105 } else {
106 page_size(elems[from].1) as nat
107 + sum_page_sizes_spec(elems, from + 1, to)
108 }
109}
110
111proof fn sum_page_sizes_extend_right(elems: Seq<(Paddr, u8)>, from: int, to: int)
113 requires
114 0 <= from <= to,
115 to < elems.len() as int,
116 ensures
117 sum_page_sizes_spec(elems, from, to + 1)
118 == sum_page_sizes_spec(elems, from, to)
119 + page_size(elems[to].1) as nat,
120 decreases to - from,
121{
122 if from < to {
123 sum_page_sizes_extend_right(elems, from + 1, to);
124 assert(sum_page_sizes_spec(elems, from, to) ==
126 page_size(elems[from].1) as nat
127 + sum_page_sizes_spec(elems, from + 1, to));
128 assert(sum_page_sizes_spec(elems, from, to + 1) ==
129 page_size(elems[from].1) as nat
130 + sum_page_sizes_spec(elems, from + 1, to + 1));
131 } else {
132 assert(sum_page_sizes_spec(elems, from, to) == 0nat);
134 assert(sum_page_sizes_spec(elems, from, to + 1) ==
135 page_size(elems[from].1) as nat
136 + sum_page_sizes_spec(elems, from + 1, to + 1));
137 assert(sum_page_sizes_spec(elems, from + 1, to + 1) == 0nat);
138 }
139}
140
141proof fn sum_page_sizes_mono(elems: Seq<(Paddr, u8)>, from: int, to1: int, to2: int)
143 requires
144 0 <= from <= to1 <= to2,
145 to2 <= elems.len() as int,
146 ensures
147 sum_page_sizes_spec(elems, from, to1) <= sum_page_sizes_spec(elems, from, to2),
148 decreases to2 - to1,
149{
150 if to1 < to2 {
151 sum_page_sizes_extend_right(elems, from, to2 - 1);
152 sum_page_sizes_mono(elems, from, to1, to2 - 1);
153 }
154}
155
156#[verifier::external_body]
165fn collect_largest_pages(
166 va: Vaddr,
167 pa: Paddr,
168 len: usize,
169) -> (res: alloc::vec::Vec<(Paddr, u8)>)
170 ensures
171 forall |i: int| 0 <= i < res@.len() ==> (#[trigger] res@[i]).0 % PAGE_SIZE == 0,
172 forall |i: int| 0 <= i < res@.len() ==> 1 <= (#[trigger] res@[i]).1 <= NR_LEVELS,
173 forall |i: int| 0 <= i < res@.len() ==>
174 (#[trigger] res@[i]).1 <= KernelPtConfig::HIGHEST_TRANSLATION_LEVEL(),
175 sum_page_sizes_spec(res@, 0, res@.len() as int) == len as nat,
176 forall |i: int| 0 <= i < res@.len() ==>
177 (va as nat + #[trigger] sum_page_sizes_spec(res@, 0, i))
178 % page_size(res@[i].1) as nat == 0,
179 forall |i: int| 0 <= i < res@.len() ==>
181 (#[trigger] res@[i]).0 as nat == pa as nat + sum_page_sizes_spec(res@, 0, i),
182{
183 largest_pages::<KernelPtConfig>(va, pa, len).collect()
184}
185
186#[verifier::external_body]
187pub(crate) fn get_kernel_page_table<'rcu>(
188 Tracked(kernel_owner): Tracked<&mut Option<&PageTableOwner<KernelPtConfig>>>,
189 Tracked(regions): Tracked<&MetaRegionOwners>,
190 Tracked(guards_k): Tracked<&Guards<'rcu, KernelPtConfig>>,
191) -> (r: &'static PageTable<KernelPtConfig>)
192 requires
193 regions.inv(),
194 ensures
195 final(kernel_owner)@ is Some,
196 final(kernel_owner)@.unwrap().inv(),
197 final(kernel_owner)@.unwrap().0.value.node is Some,
198 r.root.ptr.addr() == final(kernel_owner)@.unwrap().0.value.node.unwrap().meta_perm.addr(),
199 !PageTable::<KernelPtConfig>::create_user_pt_panic_condition(
200 final(kernel_owner)@.unwrap().0.value.node.unwrap(),
201 ),
202 final(kernel_owner)@.unwrap().0.value.metaregion_sound(*regions),
203 final(kernel_owner)@.unwrap().metaregion_sound(*regions),
204 guards_k.unlocked(
205 final(kernel_owner)@.unwrap().0.value.node.unwrap().meta_perm.addr()),
206{
207 KERNEL_PAGE_TABLE.get().unwrap()
208}
209
210pub uninterp spec fn kvirt_alloc_spec(size: usize) -> Result<core::ops::Range<Vaddr>, RangeAllocError>;
212
213pub axiom fn kvirt_alloc_succeeds(area_size: usize)
214 requires
215 0 < area_size <= usize::MAX / 2,
216 ensures
217 kvirt_alloc_spec(area_size).is_ok();
218
219pub axiom fn kvirt_alloc_range_bounds(area_size: usize, map_offset: usize, r: core::ops::Range<Vaddr>)
220 ensures
221 kvirt_alloc_spec(area_size) == Ok::<core::ops::Range<Vaddr>, RangeAllocError>(r)
222 ==> r.start <= r.end
223 && (r.end - r.start) >= area_size
224 && map_offset <= r.end - r.start
225 && r.start + map_offset <= usize::MAX
226 && r.start % PAGE_SIZE == 0
227 && r.end % PAGE_SIZE == 0
228 && KERNEL_BASE_VADDR <= r.start
229 && r.end <= KERNEL_END_VADDR
230;
231
232pub axiom fn axiom_kernel_range_valid(r: core::ops::Range<Vaddr>)
235 ensures
236 (KERNEL_BASE_VADDR <= r.start
237 && r.end <= KERNEL_END_VADDR
238 && r.start < r.end
239 && r.start % PAGE_SIZE == 0
240 && r.end % PAGE_SIZE == 0)
241 ==> is_valid_range_spec::<KernelPtConfig>(&r);
242
243#[derive(Debug)]
257pub struct KVirtArea {
258 pub range: Range<Vaddr>,
259}
260
261pub tracked struct KVirtAreaOwner {
262 pub pt_owner: PageTableOwner<KernelPtConfig>,
263}
264
265impl KVirtAreaOwner {
266 pub closed spec fn cursor_view_at(self, addr: Vaddr) -> CursorView<KernelPtConfig> {
269 CursorView {
270 cur_va: nat_align_down(addr as nat, SPEC_PAGE_SIZE as nat) as Vaddr,
271 mappings: self.pt_owner.view_rec(self.pt_owner.0.value.path),
272 phantom: PhantomData,
273 }
274 }
275}
276
277impl Inv for KVirtArea {
278 open spec fn inv(self) -> bool {
279 &&& KERNEL_BASE_VADDR <= self.range.start
280 &&& self.range.end <= KERNEL_END_VADDR
281 }
282}
283
284#[verus_verify]
285impl KVirtArea {
286
287 pub fn start(&self) -> Vaddr {
288 self.range.start
289 }
290
291 pub fn end(&self) -> Vaddr {
292 self.range.end
293 }
294
295 pub fn range(&self) -> Range<Vaddr> {
296 self.range.start..self.range.end
297 }
298
299 #[cfg(ktest)]
300 pub fn len(&self) -> usize {
301 self.range.len()
302 }
303
304 pub open spec fn query_some_condition(self, owner: KVirtAreaOwner, addr: Vaddr) -> bool {
306 owner.cursor_view_at(addr).present()
307 }
308
309 pub open spec fn query_some_ensures(
314 self,
315 owner: KVirtAreaOwner,
316 addr: Vaddr,
317 r: Option<super::MappedItem>,
318 ) -> bool {
319 r is Some && owner.cursor_view_at(addr).query_item_spec(r.unwrap()) is Some
320 }
321
322 pub open spec fn query_none_ensures(r: Option<super::MappedItem>) -> bool {
324 r is None
325 }
326
327 #[verus_spec(r =>
339 with Tracked(owner): Tracked<KVirtAreaOwner>,
340 Tracked(regions): Tracked<&mut MetaRegionOwners>,
341 Tracked(guards): Tracked<&mut Guards<'static, KernelPtConfig>>,
342 Tracked(guard_perm): Tracked<GuardPerm<'static, KernelPtConfig>>
343 requires
344 self.inv(),
345 self.range.start <= addr && addr < self.range.end,
346 self.range.start % PAGE_SIZE == 0,
347 self.range.end % PAGE_SIZE == 0,
348 old(regions).inv(),
349 owner.pt_owner.inv(),
350 forall |i: usize|
351 #![trigger old(regions).slot_owners[i]]
352 old(regions).slot_owners.contains_key(i)
353 && old(regions).slot_owners[i].inner_perms.ref_count.value()
354 != crate::specs::mm::frame::meta_owners::REF_COUNT_UNUSED
355 ==> old(regions).slot_owners[i].inner_perms.ref_count.value() + 1
356 < crate::specs::mm::frame::meta_owners::REF_COUNT_MAX,
357 ensures
358 self.query_some_condition(owner, addr) ==> self.query_some_ensures(owner, addr, r),
359 !self.query_some_condition(owner, addr) ==> Self::query_none_ensures(r),
360 )]
361 pub fn query<A: InAtomicMode + 'static>(&self, addr: Vaddr) -> Option<super::MappedItem>
362 {
363 use align_ext::AlignExt;
364
365 proof {
366 vstd_extra::prelude::lemma_pow2_is_pow2_to64();
367 broadcast use vstd::arithmetic::power2::is_pow2_equiv, vstd::arithmetic::power2::lemma_pow2;
368 let witness: nat = choose |i: nat| vstd::arithmetic::power::pow(2, i) == PAGE_SIZE as int;
369 assert(vstd::arithmetic::power2::pow2(witness) == PAGE_SIZE);
370 }
371 let start = addr.align_down(PAGE_SIZE);
372 proof {
373 assert(start <= addr);
374 assert(addr < self.range.end);
375 assert(self.range.end <= KERNEL_END_VADDR);
376 assert(KERNEL_END_VADDR + PAGE_SIZE <= usize::MAX) by (compute_only);
377 assert(start >= self.range.start);
379 assert(start + PAGE_SIZE <= self.range.end);
381 }
382 let vaddr = start..start + PAGE_SIZE;
383 proof {
384 axiom_kernel_range_valid(vaddr);
385 }
386 let page_table = {
387 proof_decl! { let tracked mut _kpt_owner: Option<&PageTableOwner<KernelPtConfig>> = None; }
388 get_kernel_page_table(Tracked(&mut _kpt_owner), Tracked(regions), Tracked(guards))
389 };
390 let preempt_guard = disable_preempt::<A>();
391 let (mut cursor, Tracked(mut cursor_owner)) =
392 (#[verus_spec(with Tracked(owner.pt_owner), Tracked(guard_perm), Tracked(regions), Tracked(guards))]
393 page_table.cursor(preempt_guard, &vaddr)).unwrap();
394 proof {
395 cursor_owner.as_page_table_owner_preserves_view_mappings();
401 assert(cursor_owner.view_mappings()
402 == owner.pt_owner.view_rec(owner.pt_owner.0.value.path));
403 cursor_owner.va.reflect_prop(cursor.va);
406 assert(cursor_owner.cur_va() == start);
407 }
408 let ghost pre_query_view = cursor_owner@;
409 let ghost pre_query_cursor_va = cursor.va;
410 let state = (#[verus_spec(with Tracked(&mut cursor_owner), Tracked(regions), Tracked(guards))]
411 cursor.query()).unwrap();
412 proof {
413 assert(cursor.va == pre_query_cursor_va);
417 assert(cursor_owner@.mappings == pre_query_view.mappings);
418 cursor_owner.va.reflect_prop(cursor.va);
419 assert(cursor_owner@.cur_va == start);
420 assert(cursor_owner@.mappings == owner.cursor_view_at(addr).mappings);
421 assert(cursor_owner@.cur_va == owner.cursor_view_at(addr).cur_va);
422 }
423 state.1
424 }
425
426 #[verus_spec(
438 with Tracked(owner): Tracked<KVirtAreaOwner>,
439 Tracked(entry_owners): Tracked<&mut Seq<EntryOwner<KernelPtConfig>>>,
440 Tracked(regions): Tracked<&mut MetaRegionOwners>,
441 Tracked(guards): Tracked<&mut Guards<'a, KernelPtConfig>>,
442 Tracked(guard_perm): Tracked<GuardPerm<'a, KernelPtConfig>>
443 )]
444 pub fn map_frames<'a, A: InAtomicMode + 'a>(
445 area_size: usize,
446 map_offset: usize,
447 frames: alloc::vec::Vec<DynFrame>,
448 prop: PageProperty,
449 ) -> Self
450 requires
451 old(regions).inv(),
452 0 < area_size,
453 map_offset < area_size,
454 map_offset % PAGE_SIZE == 0,
455 area_size <= usize::MAX / 2,
456 old(entry_owners).len() == frames.len(),
457 frames.len() as int * PAGE_SIZE as int + map_offset as int <= area_size as int,
458 forall |i: int| 0 <= i < old(entry_owners).len() ==> (#[trigger] old(entry_owners)[i]).inv(),
459 forall |i: int| 0 <= i < frames.len() ==>
460 frame_entry_wf(frames[i], prop, #[trigger] old(entry_owners)[i]),
461 forall |i: int, j: int| 0 <= i < j < frames.len() ==>
463 (#[trigger] old(entry_owners)[i]).frame.unwrap().mapped_pa != (#[trigger] old(entry_owners)[j]).frame.unwrap().mapped_pa,
464 {
465 #[cfg(feature = "allow_panic")]
466 assert!(area_size % PAGE_SIZE == 0);
467 #[cfg(feature = "allow_panic")]
468 assert!(map_offset % PAGE_SIZE == 0);
469
470 proof {
471 kvirt_alloc_succeeds(area_size);
472 }
473
474 let range = KVIRT_AREA_ALLOCATOR.alloc(area_size).unwrap();
475
476 proof {
477 kvirt_alloc_range_bounds(area_size, map_offset, range);
478 }
479
480 let cursor_range = range.start + map_offset..range.end;
481
482 proof {
483 axiom_kernel_range_valid(cursor_range);
484 }
485
486 let page_table = {
487 proof_decl! {
488 let tracked mut _kpt_owner: Option<&PageTableOwner<KernelPtConfig>> = None;
489 }
490 get_kernel_page_table(Tracked(&mut _kpt_owner), Tracked(regions), Tracked(guards))
491 };
492 let preempt_guard = disable_preempt::<A>();
493
494 let (mut cursor, Tracked(cursor_owner)) =
495 (#[verus_spec(with Tracked(owner.pt_owner), Tracked(guard_perm), Tracked(regions), Tracked(guards))]
496 page_table.cursor_mut(preempt_guard, &cursor_range)).unwrap();
497
498 let ghost init_frames_len = frames.len();
499
500 for frame in it: frames.into_iter()
501 invariant
502 cursor.inner.invariants(cursor_owner, *regions, *guards),
503 forall |i: int| 0 <= i < entry_owners.len() ==> (#[trigger]entry_owners[i]).inv(),
504 cursor.inner.va % PAGE_SIZE == 0,
505 cursor.inner.va as int + entry_owners.len() as int * PAGE_SIZE as int
506 <= cursor.inner.barrier_va.end as int,
507 cursor.inner.barrier_va.end % PAGE_SIZE == 0,
508 it.elements.len() == init_frames_len,
509 init_frames_len <= old(entry_owners).len(),
510 entry_owners.len() == old(entry_owners).len() - it.pos,
511 forall |j: int|
512 0 <= j < entry_owners.len() && it.pos + j < it.elements.len() ==>
513 frame_entry_wf(it.elements[it.pos + j], prop, entry_owners[j]),
514 forall |i: int, j: int| 0 <= i < j < it.elements.len() - it.pos ==>
516 (#[trigger]entry_owners[i]).frame.unwrap().mapped_pa != (#[trigger] entry_owners[j]).frame.unwrap().mapped_pa,
517 {
518 proof {
519 assert(entry_owners.contains(entry_owners[0]));
520 assert(frame_entry_wf(frame, prop, entry_owners[0]));
521 }
522
523 let ghost cur_mapped_pa: usize = entry_owners[0].frame.unwrap().mapped_pa;
524 let ghost cur_pa_from_wf: usize = KernelPtConfig::item_into_raw_spec(
525 MappedItem::Tracked(frame_as_dynframe(it.elements.index(it.pos as int)), prop)).0;
526 let ghost pre_remove_owners: Seq<EntryOwner<KernelPtConfig>> = *entry_owners;
527
528 let tracked mut entry_owner = entry_owners.tracked_remove(0);
529
530 let item = MappedItem::Tracked(frame, prop);
532 assert(cursor.inner.invariants(cursor_owner, *regions, *guards));
533
534 let ghost regions_before_map = *regions;
535 let ghost old_cursor_model: CursorView<KernelPtConfig> = cursor.inner.model(cursor_owner);
536 let ghost old_cursor_owner_va = cursor_owner.va;
537 proof {
538 cursor_owner.view_preserves_inv(); cursor_owner.va.reflect_prop(cursor.inner.va);
540 let (pa, level, prop_from_item) = KernelPtConfig::item_into_raw_spec(item);
541 KernelPtConfig::item_into_raw_spec_level_bounds(item);
542 KernelPtConfig::item_into_raw_spec_tracked_level(item);
543 lemma_va_align_page_size_level_1(cursor.inner.va);
544 cursor_owner.locked_range_page_aligned();
545 let ghost diff: int = cursor.inner.barrier_va.end as int - cursor.inner.va as int;
546 vstd::arithmetic::mul::lemma_mul_by_zero_is_zero(
547 nr_subpage_per_huge::<PagingConsts>().ilog2() as int,
548 );
549 vstd::arithmetic::power::lemma_pow0(2int);
550
551 KernelPtConfig::item_into_raw_spec_tracked_level(item);
552 }
553
554 assert(CursorMut::<'a, KernelPtConfig, A>::item_slot_in_regions(
557 item, *regions,
558 )) by { admit() };
559 #[verus_spec(with Tracked(&mut cursor_owner), Tracked(entry_owner), Tracked(regions), Tracked(guards))]
560 let res = cursor.map(item);
561
562 proof {
563 pre_remove_owners.remove_ensures(0);
564 let cur_idx = frame_to_index_spec(cur_mapped_pa);
565
566 let (pa, level, prop_) = KernelPtConfig::item_into_raw_spec(item);
567 KernelPtConfig::item_into_raw_spec_tracked_level(item);
568
569 let split_self = old_cursor_model.split_while_huge(PAGE_SIZE);
570
571 let aligned_nat: nat = vstd_extra::arithmetic::nat_align_up(
572 split_self.cur_va as nat, PAGE_SIZE as nat);
573
574 vstd_extra::arithmetic::lemma_nat_align_up_sound(
575 split_self.cur_va as nat, PAGE_SIZE as nat);
576
577 CursorView::<KernelPtConfig>::lemma_split_while_huge_preserves_cur_va(
578 old_cursor_model, PAGE_SIZE);
579
580 assert(aligned_nat == vstd_extra::arithmetic::nat_align_up(
581 old_cursor_owner_va.to_vaddr() as nat, PAGE_SIZE as nat));
582 old_cursor_owner_va.align_diff(1);
583
584 assert(vstd_extra::arithmetic::nat_align_down(
585 old_cursor_owner_va.to_vaddr() as nat, PAGE_SIZE as nat)
586 == old_cursor_owner_va.to_vaddr() as nat);
587
588 cursor_owner.va.reflect_prop(cursor.inner.va);
589 }
590 }
591
592 Self { range }
593 }
594
595 #[verus_spec(
614 with Tracked(owner): Tracked<KVirtAreaOwner>,
615 Tracked(regions): Tracked<&mut MetaRegionOwners>,
616 Tracked(guards): Tracked<&mut Guards<'a, KernelPtConfig>>,
617 Tracked(guard_perm): Tracked<GuardPerm<'a, KernelPtConfig>>
618 )]
619 pub unsafe fn map_untracked_frames<A: InAtomicMode + 'a, 'a>(
620 area_size: usize,
621 map_offset: usize,
622 pa_range: Range<Paddr>,
623 prop: PageProperty,
624 ) -> Self
625 requires
626 old(regions).inv(),
627 0 < area_size,
628 area_size <= usize::MAX / 2,
629 map_offset % PAGE_SIZE == 0,
630 map_offset < area_size,
631 pa_range.end as nat - pa_range.start as nat <= area_size as nat - map_offset as nat,
633 pa_range.end <= MAX_PADDR,
635 pa_range.start % PAGE_SIZE == 0,
637 pa_range.end % PAGE_SIZE == 0,
638 old(regions).paddr_range_not_mapped(pa_range),
640 {
641 #[cfg(feature = "allow_panic")]
642 assert!(pa_range.start % PAGE_SIZE == 0);
643 #[cfg(feature = "allow_panic")]
644 assert!(pa_range.end % PAGE_SIZE == 0);
645 #[cfg(feature = "allow_panic")]
646 assert!(area_size % PAGE_SIZE == 0);
647 #[cfg(feature = "allow_panic")]
648 assert!(map_offset % PAGE_SIZE == 0);
649 #[cfg(feature = "allow_panic")]
650 assert!(map_offset + (pa_range.end - pa_range.start) <= area_size);
651
652 proof {
653 kvirt_alloc_succeeds(area_size);
654 }
655
656 let range = KVIRT_AREA_ALLOCATOR.alloc(area_size).unwrap();
657
658 proof {
659 kvirt_alloc_range_bounds(area_size, map_offset, range);
660 }
661
662 if pa_range.start < pa_range.end {
663 let len = pa_range.end - pa_range.start;
664 let va_range = range.start + map_offset..range.start + map_offset + len;
665
666 proof {
667 axiom_kernel_range_valid(va_range);
668}
670
671 let page_table = {
672 proof_decl! {
673 let tracked mut _kpt_owner: Option<&PageTableOwner<KernelPtConfig>> = None;
674 }
675 get_kernel_page_table(Tracked(&mut _kpt_owner), Tracked(regions), Tracked(guards))
676 };
677 let preempt_guard = disable_preempt::<A>();
678
679 let ghost pre_cursor_regions: MetaRegionOwners = *regions;
681
682 let (mut cursor, Tracked(cursor_owner)) =
683 (#[verus_spec(with Tracked(owner.pt_owner), Tracked(guard_perm), Tracked(regions), Tracked(guards))]
684 page_table.cursor_mut(preempt_guard, &va_range)).unwrap();
685
686 let pages = collect_largest_pages(va_range.start, pa_range.start, len);
687
688 for (pa, level) in it: pages.into_iter()
689 invariant
690 cursor.inner.invariants(cursor_owner, *regions, *guards),
691 forall |i: int| 0 <= i < it.elements.len() ==> (#[trigger] it.elements[i]).0 % PAGE_SIZE == 0,
692 forall |i: int| 0 <= i < it.elements.len() ==>
693 1 <= (#[trigger] it.elements[i]).1<= NR_LEVELS,
694 forall |i: int| 0 <= i < it.elements.len() ==>
695 (#[trigger] it.elements[i]).1 <= KernelPtConfig::HIGHEST_TRANSLATION_LEVEL(),
696 sum_page_sizes_spec(it.elements, 0, it.elements.len() as int) == len as nat,
697 forall |i: int| 0 <= i < it.elements.len() ==>
698 (va_range.start as nat + #[trigger] sum_page_sizes_spec(it.elements, 0, i))
699 % page_size(it.elements[i].1) as nat == 0,
700 cursor.inner.va as nat
702 == va_range.start as nat
703 + sum_page_sizes_spec(it.elements, 0, it.pos as int),
704 cursor.inner.barrier_va.end == va_range.start + len,
705 it.pos <= it.elements.len(),
706 forall |i: int| #![auto] 0 <= i < it.elements.len() ==>
708 it.elements[i].0 as nat
709 == pa_range.start as nat + sum_page_sizes_spec(it.elements, 0, i),
710 regions.paddr_range_not_mapped(
712 (pa_range.start as nat
713 + sum_page_sizes_spec(it.elements, 0, it.pos as int)) as usize
714 ..pa_range.end
715 ),
716 pa_range.end as nat == pa_range.start as nat + len as nat,
718 cursor.inner.guard_level == NR_LEVELS as u8,
720 pa_range.end <= MAX_PADDR,
722 {
723 let pos: Ghost<int> = Ghost(it.pos as int);
724
725 proof {
726 sum_page_sizes_extend_right(it.elements, 0, pos@);
727 sum_page_sizes_mono(it.elements, 0, pos@ + 1, it.elements.len() as int);
728 }
729
730 let item = MappedItem::Untracked(pa, level, prop);
731 proof {
732 lemma_page_size_ge_page_size(level);
733 assert(pa as nat + page_size(level) as nat <= pa_range.end as nat);
734 assert(pa < MAX_PADDR);
735 }
736 proof_decl! {
737 let tracked mut entry_owner =
738 EntryOwner::<KernelPtConfig>::new_untracked_frame(pa, level, prop);
739 entry_owner.in_scope = false;
740 }
741
742 let ghost old_cursor_model: CursorView<KernelPtConfig> =
743 cursor.inner.model(cursor_owner);
744 let ghost old_cursor_owner_va = cursor_owner.va;
745 let ghost old_va: nat = cursor.inner.va as nat;
746
747 proof {
748 cursor_owner.view_preserves_inv(); cursor_owner.va.reflect_prop(cursor.inner.va);
750
751 KernelPtConfig::item_into_raw_spec_untracked(pa, level, prop);
752
753 sum_page_sizes_extend_right(it.elements, 0, pos@);
755 sum_page_sizes_mono(
756 it.elements, 0, pos@ + 1, it.elements.len() as int);
757 }
758
759 let ghost pre_map_regions: MetaRegionOwners = *regions;
761
762 proof {
763 KernelPtConfig::axiom_kernel_htl_lt_nr_levels();
764 assert(!cursor.map_panic_conditions(item));
765 assert(cursor.item_wf(item, entry_owner));
766 }
767 assume(CursorMut::<'a, KernelPtConfig, A>::item_slot_in_regions(item, *regions));
769 #[verus_spec(with Tracked(&mut cursor_owner), Tracked(entry_owner), Tracked(regions), Tracked(guards))]
770 let _ = cursor.map(item);
771
772 proof {
774 KernelPtConfig::item_into_raw_spec_untracked(pa, level, prop);
775 let level_raw = KernelPtConfig::item_into_raw_spec(item).1;
776
777 let split_self = old_cursor_model.split_while_huge(
778 page_size(level_raw));
779
780 CursorView::<KernelPtConfig>::lemma_split_while_huge_preserves_cur_va(
781 old_cursor_model, page_size(level_raw));
782
783 let aligned_nat = vstd_extra::arithmetic::nat_align_up(
784 old_cursor_owner_va.to_vaddr() as nat,
785 page_size(level_raw) as nat,
786 );
787
788 lemma_page_size_ge_page_size(level_raw);
789
790 vstd_extra::arithmetic::lemma_nat_align_up_sound(
791 old_cursor_owner_va.to_vaddr() as nat,
792 page_size(level_raw) as nat,
793 );
794
795 old_cursor_owner_va.align_diff(level_raw as int);
796
797 sum_page_sizes_extend_right(it.elements, 0, pos@);
798
799 let pa_next_nat = pa_range.start as nat + sum_page_sizes_spec(it.elements, 0, pos@ + 1);
800 assert(pa_next_nat == pa as nat + page_size(level) as nat);
801
802 if pos@ + 1 < it.elements.len() as int {
803 let next_level = it.elements[pos@ + 1].1;
804 sum_page_sizes_extend_right(it.elements, 0, pos@ + 1);
805 sum_page_sizes_mono(it.elements, 0, pos@ + 2, it.elements.len() as int);
806 lemma_page_size_ge_page_size(next_level);
807 }
808 }
809 }
810 }
811
812 Self { range }
813 }
814}
815
816}