Skip to main content

ostd/mm/kspace/
kvirt_area.rs

1// SPDX-License-Identifier: MPL-2.0
2//! Kernel virtual memory allocation
3use vstd::prelude::*;
4
5use vstd_extra::arithmetic::nat_align_down;
6use vstd_extra::ownership::{InvView, ModelOf, OwnerOf};
7use vstd_extra::prelude::Inv;
8
9use core::marker::PhantomData;
10use core::ops::Range;
11
12use super::{KERNEL_PAGE_TABLE, VMALLOC_VADDR_RANGE, KERNEL_BASE_VADDR, KERNEL_END_VADDR};
13use crate::{
14    mm::{
15        frame::{untyped::AnyUFrameMeta, Frame, Segment},
16        kspace::{KernelPtConfig, MappedItem},
17        largest_pages,
18        page_prop::PageProperty,
19        Paddr, Vaddr, PAGE_SIZE,
20        page_table::{
21            is_valid_range_spec, page_size,
22            Child, CursorMut, PageTable, PageTableConfig,
23        },
24    },
25};
26
27use crate::specs::arch::mm::{MAX_PADDR, PAGE_SIZE as SPEC_PAGE_SIZE, NR_LEVELS};
28use crate::mm::PagingConstsTrait;
29use crate::specs::arch::PagingConsts;
30use crate::mm::nr_subpage_per_huge;
31use crate::specs::task::InAtomicMode;
32use crate::specs::mm::page_table::cursor::{CursorOwner, CursorView};
33use crate::specs::mm::page_table::*;
34use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
35use crate::mm::page_table::PageTableGuard;
36use crate::mm::frame::DynFrame;
37use crate::mm::kspace::AnyFrameMeta;
38use crate::specs::mm::frame::mapping::frame_to_index_spec;
39
40//static KVIRT_AREA_ALLOCATOR: RangeAllocator = RangeAllocator::new(VMALLOC_VADDR_RANGE);
41
42verus! {
43
44/// Spec representation of Frame<T> as DynFrame (used when the actual conversion is opaque).
45pub open spec fn frame_as_dynframe<T: AnyFrameMeta>(frame: Frame<T>) -> DynFrame {
46    DynFrame { ptr: frame.ptr, _marker: PhantomData }
47}
48
49/// Converts `Frame<T>` to `DynFrame`, with a spec postcondition connecting the result
50/// to the spec function `frame_as_dynframe`. The `Into` impl uses `transmute`, so the
51/// function is marked `external_body` — same trust boundary as the underlying conversion.
52#[verifier::external_body]
53fn frame_into_dynframe<T: AnyUFrameMeta>(frame: Frame<T>) -> (res: DynFrame)
54    ensures
55        res == frame_as_dynframe(frame),
56{
57    frame.into()
58}
59
60/// Spec function: the entry owner correctly matches the frame and property for mapping.
61pub open spec fn frame_entry_wf<T: AnyFrameMeta>(
62    frame: Frame<T>,
63    prop: PageProperty,
64    entry_owner: EntryOwner<KernelPtConfig>,
65) -> bool {
66    let frame_mss = DynFrame { ptr: frame.ptr, _marker: PhantomData };
67    let item = MappedItem::Tracked(frame_mss, prop);
68    let (pa, level, prop_from_item) = KernelPtConfig::item_into_raw_spec(item);
69    Child::Frame(pa, level, prop_from_item).wf(entry_owner)
70}
71
72#[derive(Debug)]
73pub struct RangeAllocError;
74
75pub struct RangeAllocator;
76
77impl RangeAllocator {
78    #[verifier::external_body]
79    pub const fn new(_r: core::ops::Range<Vaddr>) -> Self {
80        Self
81    }
82
83    #[verifier::external_body]
84    pub fn alloc(&self, size: usize) -> (r: Result<core::ops::Range<Vaddr>, RangeAllocError>)
85        ensures
86            r == kvirt_alloc_spec(size),
87    {
88        unimplemented!()
89    }
90}
91
92#[verifier::external_body]
93pub fn disable_preempt<'a, G: InAtomicMode + 'a>() -> &'a G {
94    unimplemented!()
95}
96
97exec static KVIRT_AREA_ALLOCATOR: RangeAllocator = RangeAllocator::new(VMALLOC_VADDR_RANGE);
98
99/// Total size (in bytes) of the pages `elems[from..to]`.
100pub open spec fn sum_page_sizes_spec(elems: Seq<(Paddr, u8)>, from: int, to: int) -> nat
101    decreases (to - from) when from <= to
102{
103    if from >= to {
104        0nat
105    } else {
106        page_size(elems[from].1) as nat
107            + sum_page_sizes_spec(elems, from + 1, to)
108    }
109}
110
111/// `sum(from, to+1) == sum(from, to) + page_size(elems[to])`.
112proof fn sum_page_sizes_extend_right(elems: Seq<(Paddr, u8)>, from: int, to: int)
113    requires
114        0 <= from <= to,
115        to < elems.len() as int,
116    ensures
117        sum_page_sizes_spec(elems, from, to + 1)
118            == sum_page_sizes_spec(elems, from, to)
119                + page_size(elems[to].1) as nat,
120    decreases to - from,
121{
122    if from < to {
123        sum_page_sizes_extend_right(elems, from + 1, to);
124        // Help Verus: unfold sum_page_sizes_spec(elems, from, to) and (from, to+1)
125        assert(sum_page_sizes_spec(elems, from, to) ==
126            page_size(elems[from].1) as nat
127                + sum_page_sizes_spec(elems, from + 1, to));
128        assert(sum_page_sizes_spec(elems, from, to + 1) ==
129            page_size(elems[from].1) as nat
130                + sum_page_sizes_spec(elems, from + 1, to + 1));
131    } else {
132        // from == to; explicitly unfold both sides
133        assert(sum_page_sizes_spec(elems, from, to) == 0nat);
134        assert(sum_page_sizes_spec(elems, from, to + 1) ==
135            page_size(elems[from].1) as nat
136                + sum_page_sizes_spec(elems, from + 1, to + 1));
137        assert(sum_page_sizes_spec(elems, from + 1, to + 1) == 0nat);
138    }
139}
140
141/// `sum(from, to1) <= sum(from, to2)` when `to1 <= to2`.
142proof fn sum_page_sizes_mono(elems: Seq<(Paddr, u8)>, from: int, to1: int, to2: int)
143    requires
144        0 <= from <= to1 <= to2,
145        to2 <= elems.len() as int,
146    ensures
147        sum_page_sizes_spec(elems, from, to1) <= sum_page_sizes_spec(elems, from, to2),
148    decreases to2 - to1,
149{
150    if to1 < to2 {
151        sum_page_sizes_extend_right(elems, from, to2 - 1);
152        sum_page_sizes_mono(elems, from, to1, to2 - 1);
153    }
154}
155
156/// Collects the output of `largest_pages` into a `Vec` so that Verus can iterate
157/// over it with loop invariants (`impl Iterator` does not implement `ForLoopGhostIteratorNew`).
158///
159/// The postconditions reflect provable properties of every `(pa, level)` pair emitted by
160/// `largest_pages`:
161/// - PA alignment and level-validity follow from the loop guard in `largest_pages`.
162/// - The sum of page sizes equals `len` (the iterator covers exactly [pa, pa+len)).
163/// - At each step, the running VA is aligned to the current page size.
164#[verifier::external_body]
165fn collect_largest_pages(
166    va: Vaddr,
167    pa: Paddr,
168    len: usize,
169) -> (res: alloc::vec::Vec<(Paddr, u8)>)
170    ensures
171        forall |i: int| 0 <= i < res@.len() ==> (#[trigger] res@[i]).0 % PAGE_SIZE == 0,
172        forall |i: int| 0 <= i < res@.len() ==> 1 <= (#[trigger] res@[i]).1 <= NR_LEVELS,
173        forall |i: int| 0 <= i < res@.len() ==>
174            (#[trigger] res@[i]).1 <= KernelPtConfig::HIGHEST_TRANSLATION_LEVEL(),
175        sum_page_sizes_spec(res@, 0, res@.len() as int) == len as nat,
176        forall |i: int| 0 <= i < res@.len() ==>
177            (va as nat + #[trigger] sum_page_sizes_spec(res@, 0, i))
178                % page_size(res@[i].1) as nat == 0,
179        // PA tracking: each element's physical address equals pa + sum of preceding page sizes.
180        forall |i: int| 0 <= i < res@.len() ==>
181            (#[trigger] res@[i]).0 as nat == pa as nat + sum_page_sizes_spec(res@, 0, i),
182{
183    largest_pages::<KernelPtConfig>(va, pa, len).collect()
184}
185
186#[verifier::external_body]
187pub(crate) fn get_kernel_page_table<'rcu>(
188    Tracked(kernel_owner): Tracked<&mut Option<&PageTableOwner<KernelPtConfig>>>,
189    Tracked(regions): Tracked<&MetaRegionOwners>,
190    Tracked(guards_k): Tracked<&Guards<'rcu, KernelPtConfig>>,
191) -> (r: &'static PageTable<KernelPtConfig>)
192    requires
193        regions.inv(),
194    ensures
195        final(kernel_owner)@ is Some,
196        final(kernel_owner)@.unwrap().inv(),
197        final(kernel_owner)@.unwrap().0.value.node is Some,
198        r.root.ptr.addr() == final(kernel_owner)@.unwrap().0.value.node.unwrap().meta_perm.addr(),
199        !PageTable::<KernelPtConfig>::create_user_pt_panic_condition(
200            final(kernel_owner)@.unwrap().0.value.node.unwrap(),
201        ),
202        final(kernel_owner)@.unwrap().0.value.metaregion_sound(*regions),
203        final(kernel_owner)@.unwrap().metaregion_sound(*regions),
204        guards_k.unlocked(
205            final(kernel_owner)@.unwrap().0.value.node.unwrap().meta_perm.addr()),
206{
207    KERNEL_PAGE_TABLE.get().unwrap()
208}
209
210// Axiomatized spec for alloc - cannot read exec static in proof mode.
211pub uninterp spec fn kvirt_alloc_spec(size: usize) -> Result<core::ops::Range<Vaddr>, RangeAllocError>;
212
213pub axiom fn kvirt_alloc_succeeds(area_size: usize)
214    requires
215        0 < area_size <= usize::MAX / 2,
216    ensures
217        kvirt_alloc_spec(area_size).is_ok();
218
219pub axiom fn kvirt_alloc_range_bounds(area_size: usize, map_offset: usize, r: core::ops::Range<Vaddr>)
220    ensures
221        kvirt_alloc_spec(area_size) == Ok::<core::ops::Range<Vaddr>, RangeAllocError>(r)
222        ==> r.start <= r.end
223            && (r.end - r.start) >= area_size
224            && map_offset <= r.end - r.start
225            && r.start + map_offset <= usize::MAX
226            && r.start % PAGE_SIZE == 0
227            && r.end % PAGE_SIZE == 0
228            && KERNEL_BASE_VADDR <= r.start
229            && r.end <= KERNEL_END_VADDR
230;
231
232/// Kernel ranges within [KERNEL_BASE_VADDR, KERNEL_END_VADDR] with alignment are valid for
233/// KernelPtConfig (which uses sign-extended high-half addresses).
234pub axiom fn axiom_kernel_range_valid(r: core::ops::Range<Vaddr>)
235    ensures
236        (KERNEL_BASE_VADDR <= r.start
237            && r.end <= KERNEL_END_VADDR
238            && r.start < r.end
239            && r.start % PAGE_SIZE == 0
240            && r.end % PAGE_SIZE == 0)
241        ==> is_valid_range_spec::<KernelPtConfig>(&r);
242
243/// Kernel virtual area.
244///
245/// A kernel virtual area manages a range of memory in [`VMALLOC_VADDR_RANGE`].
246/// It can map a portion or the entirety of its virtual memory pages to
247/// physical memory, whether tracked with metadata or not.
248///
249/// It is the caller's responsibility to ensure TLB coherence before using the
250/// mapped virtual address on a certain CPU.
251//
252// FIXME: This caller-ensured design is very error-prone. A good option is to
253// use a guard the pins the CPU and ensures TLB coherence while accessing the
254// `KVirtArea`. However, `IoMem` need some non trivial refactoring to support
255// being implemented on a `!Send` and `!Sync` guard.
256#[derive(Debug)]
257pub struct KVirtArea {
258    pub range: Range<Vaddr>,
259}
260
261pub tracked struct KVirtAreaOwner {
262    pub pt_owner: PageTableOwner<KernelPtConfig>,
263}
264
265impl KVirtAreaOwner {
266    /// The [`CursorView`] at the page containing the given address, representing the kernel page
267    /// table's abstract state for query purposes.
268    pub closed spec fn cursor_view_at(self, addr: Vaddr) -> CursorView<KernelPtConfig> {
269        CursorView {
270            cur_va: nat_align_down(addr as nat, SPEC_PAGE_SIZE as nat) as Vaddr,
271            mappings: self.pt_owner.view_rec(self.pt_owner.0.value.path),
272            phantom: PhantomData,
273        }
274    }
275}
276
277impl Inv for KVirtArea {
278    open spec fn inv(self) -> bool {
279        &&& KERNEL_BASE_VADDR <= self.range.start
280        &&& self.range.end <= KERNEL_END_VADDR
281    }
282}
283
284#[verus_verify]
285impl KVirtArea {
286
287    pub fn start(&self) -> Vaddr {
288        self.range.start
289    }
290
291    pub fn end(&self) -> Vaddr {
292        self.range.end
293    }
294
295    pub fn range(&self) -> Range<Vaddr> {
296        self.range.start..self.range.end
297    }
298
299    #[cfg(ktest)]
300    pub fn len(&self) -> usize {
301        self.range.len()
302    }
303
304    /// Whether there is a mapped item at the page containing the address.
305    pub open spec fn query_some_condition(self, owner: KVirtAreaOwner, addr: Vaddr) -> bool {
306        owner.cursor_view_at(addr).present()
307    }
308
309    /// Postcondition when a mapping exists at the page.
310    ///
311    /// The returned item corresponds to the abstract mapping given by
312    /// [`query_item_spec`](CursorView::query_item_spec).
313    pub open spec fn query_some_ensures(
314        self,
315        owner: KVirtAreaOwner,
316        addr: Vaddr,
317        r: Option<super::MappedItem>,
318    ) -> bool {
319        r is Some && owner.cursor_view_at(addr).query_item_spec(r.unwrap()) is Some
320    }
321
322    /// Postcondition when no mapping exists at the page.
323    pub open spec fn query_none_ensures(r: Option<super::MappedItem>) -> bool {
324        r is None
325    }
326
327    /// Queries the mapping at the given virtual address.
328    ///
329    /// Returns the mapped item at the page containing `addr`, or `None` if there is no mapping.
330    ///
331    /// ## Preconditions
332    /// - The kernel virtual area invariant holds ([`inv`](Inv::inv)).
333    /// - The address is within the virtual area's range.
334    /// ## Postconditions
335    /// - If there is a mapped item at the page containing the address ([`query_some_condition`]),
336    /// it is returned ([`query_some_ensures`]).
337    /// - If there is no mapping at that page, `None` is returned ([`query_none_ensures`]).
338    #[verus_spec(r =>
339        with Tracked(owner): Tracked<KVirtAreaOwner>,
340            Tracked(regions): Tracked<&mut MetaRegionOwners>,
341            Tracked(guards): Tracked<&mut Guards<'static, KernelPtConfig>>,
342            Tracked(guard_perm): Tracked<GuardPerm<'static, KernelPtConfig>>
343        requires
344            self.inv(),
345            self.range.start <= addr && addr < self.range.end,
346            self.range.start % PAGE_SIZE == 0,
347            self.range.end % PAGE_SIZE == 0,
348            old(regions).inv(),
349            owner.pt_owner.inv(),
350            forall |i: usize|
351                #![trigger old(regions).slot_owners[i]]
352                old(regions).slot_owners.contains_key(i)
353                && old(regions).slot_owners[i].inner_perms.ref_count.value()
354                    != crate::specs::mm::frame::meta_owners::REF_COUNT_UNUSED
355                ==> old(regions).slot_owners[i].inner_perms.ref_count.value() + 1
356                    < crate::specs::mm::frame::meta_owners::REF_COUNT_MAX,
357        ensures
358            self.query_some_condition(owner, addr) ==> self.query_some_ensures(owner, addr, r),
359            !self.query_some_condition(owner, addr) ==> Self::query_none_ensures(r),
360    )]
361    pub fn query<A: InAtomicMode + 'static>(&self, addr: Vaddr) -> Option<super::MappedItem>
362    {
363        use align_ext::AlignExt;
364
365        proof {
366            vstd_extra::prelude::lemma_pow2_is_pow2_to64();
367            broadcast use vstd::arithmetic::power2::is_pow2_equiv, vstd::arithmetic::power2::lemma_pow2;
368            let witness: nat = choose |i: nat| vstd::arithmetic::power::pow(2, i) == PAGE_SIZE as int;
369            assert(vstd::arithmetic::power2::pow2(witness) == PAGE_SIZE);
370        }
371        let start = addr.align_down(PAGE_SIZE);
372        proof {
373            assert(start <= addr);
374            assert(addr < self.range.end);
375            assert(self.range.end <= KERNEL_END_VADDR);
376            assert(KERNEL_END_VADDR + PAGE_SIZE <= usize::MAX) by (compute_only);
377            // start >= self.range.start: align_down's forall postcondition instantiated at self.range.start.
378            assert(start >= self.range.start);
379            // start + PAGE_SIZE <= self.range.end: self.range.end is page-aligned and addr < self.range.end.
380            assert(start + PAGE_SIZE <= self.range.end);
381        }
382        let vaddr = start..start + PAGE_SIZE;
383        proof {
384            axiom_kernel_range_valid(vaddr);
385        }
386        let page_table = {
387            proof_decl! { let tracked mut _kpt_owner: Option<&PageTableOwner<KernelPtConfig>> = None; }
388            get_kernel_page_table(Tracked(&mut _kpt_owner), Tracked(regions), Tracked(guards))
389        };
390        let preempt_guard = disable_preempt::<A>();
391        let (mut cursor, Tracked(mut cursor_owner)) =
392            (#[verus_spec(with Tracked(owner.pt_owner), Tracked(guard_perm), Tracked(regions), Tracked(guards))]
393                page_table.cursor(preempt_guard, &vaddr)).unwrap();
394        proof {
395            // Bridge `cursor_owner@.mappings` to `owner.cursor_view_at(addr).mappings`.
396            // PageTable::cursor ensures `cursor_owner.as_page_table_owner() == owner.pt_owner`
397            // and `cursor_owner.continuations[3].path() == owner.pt_owner.0.value.path`, and
398            // `as_page_table_owner_preserves_view_mappings` turns the cursor's view into a
399            // `view_rec` call on the owner at the root path.
400            cursor_owner.as_page_table_owner_preserves_view_mappings();
401            assert(cursor_owner.view_mappings()
402                == owner.pt_owner.view_rec(owner.pt_owner.0.value.path));
403            // cur_va agreement: `cursor.wf(cursor_owner)` gives `cursor_owner.va.reflect(cursor.va)`,
404            // which `reflect_prop` converts into `cursor_owner.va.to_vaddr() == cursor.va`.
405            cursor_owner.va.reflect_prop(cursor.va);
406            assert(cursor_owner.cur_va() == start);
407        }
408        let ghost pre_query_view = cursor_owner@;
409        let ghost pre_query_cursor_va = cursor.va;
410        let state = (#[verus_spec(with Tracked(&mut cursor_owner), Tracked(regions), Tracked(guards))]
411            cursor.query()).unwrap();
412        proof {
413            // `Cursor::query` preserves `self.va` (loop invariant + new ensures) and
414            // `cursor_owner@.mappings`. With `cursor.wf(cursor_owner)` reestablished
415            // by post-query invariants, we can recover `cursor_owner@.cur_va == start`.
416            assert(cursor.va == pre_query_cursor_va);
417            assert(cursor_owner@.mappings == pre_query_view.mappings);
418            cursor_owner.va.reflect_prop(cursor.va);
419            assert(cursor_owner@.cur_va == start);
420            assert(cursor_owner@.mappings == owner.cursor_view_at(addr).mappings);
421            assert(cursor_owner@.cur_va == owner.cursor_view_at(addr).cur_va);
422        }
423        state.1
424    }
425
426    /// Create a kernel virtual area and map tracked pages into it.
427    ///
428    /// The created virtual area will have a size of `area_size`, and the pages
429    /// will be mapped starting from `map_offset` in the area.
430    ///
431    /// # Panics
432    ///
433    /// This function panics if
434    ///  - the area size is not a multiple of [`PAGE_SIZE`];
435    ///  - the map offset is not aligned to [`PAGE_SIZE`];
436    ///  - the map offset plus the size of the pages exceeds the area size.
437    #[verus_spec(
438        with Tracked(owner): Tracked<KVirtAreaOwner>,
439            Tracked(entry_owners): Tracked<&mut Seq<EntryOwner<KernelPtConfig>>>,
440            Tracked(regions): Tracked<&mut MetaRegionOwners>,
441            Tracked(guards): Tracked<&mut Guards<'a, KernelPtConfig>>,
442            Tracked(guard_perm): Tracked<GuardPerm<'a, KernelPtConfig>>
443    )]
444    pub fn map_frames<'a, A: InAtomicMode + 'a>(
445        area_size: usize,
446        map_offset: usize,
447        frames: alloc::vec::Vec<DynFrame>,
448        prop: PageProperty,
449    ) -> Self
450        requires
451            old(regions).inv(),
452            0 < area_size,
453            map_offset < area_size,
454            map_offset % PAGE_SIZE == 0,
455            area_size <= usize::MAX / 2,
456            old(entry_owners).len() == frames.len(),
457            frames.len() as int * PAGE_SIZE as int + map_offset as int <= area_size as int,
458            forall |i: int| 0 <= i < old(entry_owners).len() ==> (#[trigger] old(entry_owners)[i]).inv(),
459            forall |i: int| 0 <= i < frames.len() ==>
460                frame_entry_wf(frames[i], prop, #[trigger] old(entry_owners)[i]),
461            // Frames have distinct physical addresses (follows from linearity of slot_perm ownership).
462            forall |i: int, j: int| 0 <= i < j < frames.len() ==>
463                (#[trigger] old(entry_owners)[i]).frame.unwrap().mapped_pa != (#[trigger] old(entry_owners)[j]).frame.unwrap().mapped_pa,
464    {
465        #[cfg(feature = "allow_panic")]
466        assert!(area_size % PAGE_SIZE == 0);
467        #[cfg(feature = "allow_panic")]
468        assert!(map_offset % PAGE_SIZE == 0);
469
470        proof {
471            kvirt_alloc_succeeds(area_size);
472        }
473
474        let range = KVIRT_AREA_ALLOCATOR.alloc(area_size).unwrap();
475
476        proof {
477            kvirt_alloc_range_bounds(area_size, map_offset, range);
478        }
479
480        let cursor_range = range.start + map_offset..range.end;
481
482        proof {
483            axiom_kernel_range_valid(cursor_range);
484        }
485
486        let page_table = {
487                proof_decl! {
488                    let tracked mut _kpt_owner: Option<&PageTableOwner<KernelPtConfig>> = None;
489                }
490                get_kernel_page_table(Tracked(&mut _kpt_owner), Tracked(regions), Tracked(guards))
491            };
492        let preempt_guard = disable_preempt::<A>();
493
494        let (mut cursor, Tracked(cursor_owner)) =
495        (#[verus_spec(with Tracked(owner.pt_owner), Tracked(guard_perm), Tracked(regions), Tracked(guards))]
496            page_table.cursor_mut(preempt_guard, &cursor_range)).unwrap();
497
498        let ghost init_frames_len = frames.len();
499
500        for frame in it: frames.into_iter()
501            invariant
502                cursor.inner.invariants(cursor_owner, *regions, *guards),
503                forall |i: int| 0 <= i < entry_owners.len() ==> (#[trigger]entry_owners[i]).inv(),
504                cursor.inner.va % PAGE_SIZE == 0,
505                cursor.inner.va as int + entry_owners.len() as int * PAGE_SIZE as int
506                    <= cursor.inner.barrier_va.end as int,
507                cursor.inner.barrier_va.end % PAGE_SIZE == 0,
508                it.elements.len() == init_frames_len,
509                init_frames_len <= old(entry_owners).len(),
510                entry_owners.len() == old(entry_owners).len() - it.pos,
511                forall |j: int|
512                    0 <= j < entry_owners.len() && it.pos + j < it.elements.len() ==>
513                    frame_entry_wf(it.elements[it.pos + j], prop, entry_owners[j]),
514                // Remaining frames have distinct physical addresses
515                forall |i: int, j: int| 0 <= i < j < it.elements.len() - it.pos ==>
516                    (#[trigger]entry_owners[i]).frame.unwrap().mapped_pa != (#[trigger] entry_owners[j]).frame.unwrap().mapped_pa,
517        {
518            proof {
519                assert(entry_owners.contains(entry_owners[0]));
520                assert(frame_entry_wf(frame, prop, entry_owners[0]));
521            }
522
523            let ghost cur_mapped_pa: usize = entry_owners[0].frame.unwrap().mapped_pa;
524            let ghost cur_pa_from_wf: usize = KernelPtConfig::item_into_raw_spec(
525                MappedItem::Tracked(frame_as_dynframe(it.elements.index(it.pos as int)), prop)).0;
526            let ghost pre_remove_owners: Seq<EntryOwner<KernelPtConfig>> = *entry_owners;
527
528            let tracked mut entry_owner = entry_owners.tracked_remove(0);
529
530            // Now Verus knows: dynframe == frame_as_dynframe(it.elements[it.pos])
531            let item = MappedItem::Tracked(frame, prop);
532            assert(cursor.inner.invariants(cursor_owner, *regions, *guards));
533
534            let ghost regions_before_map = *regions;
535            let ghost old_cursor_model: CursorView<KernelPtConfig> = cursor.inner.model(cursor_owner);
536            let ghost old_cursor_owner_va = cursor_owner.va;
537            proof {
538                cursor_owner.view_preserves_inv(); // old_cursor_model.inv()
539                cursor_owner.va.reflect_prop(cursor.inner.va);
540                let (pa, level, prop_from_item) = KernelPtConfig::item_into_raw_spec(item);
541                KernelPtConfig::item_into_raw_spec_level_bounds(item);
542                KernelPtConfig::item_into_raw_spec_tracked_level(item);
543                lemma_va_align_page_size_level_1(cursor.inner.va);
544                cursor_owner.locked_range_page_aligned();
545                let ghost diff: int = cursor.inner.barrier_va.end as int - cursor.inner.va as int;
546                vstd::arithmetic::mul::lemma_mul_by_zero_is_zero(
547                    nr_subpage_per_huge::<PagingConsts>().ilog2() as int,
548                );
549                vstd::arithmetic::power::lemma_pow0(2int);
550
551                KernelPtConfig::item_into_raw_spec_tracked_level(item);
552            }
553
554            // SAFETY: The constructor of the `KVirtArea` has already ensured
555            // that this mapping does not affect kernel's memory safety.
556            assert(CursorMut::<'a, KernelPtConfig, A>::item_slot_in_regions(
557                item, *regions,
558            )) by { admit() };
559            #[verus_spec(with Tracked(&mut cursor_owner), Tracked(entry_owner), Tracked(regions), Tracked(guards))]
560            let res = cursor.map(item);
561
562            proof {
563                pre_remove_owners.remove_ensures(0);
564                let cur_idx = frame_to_index_spec(cur_mapped_pa);
565
566                let (pa, level, prop_) = KernelPtConfig::item_into_raw_spec(item);
567                KernelPtConfig::item_into_raw_spec_tracked_level(item);
568
569                let split_self = old_cursor_model.split_while_huge(PAGE_SIZE);
570
571                let aligned_nat: nat = vstd_extra::arithmetic::nat_align_up(
572                    split_self.cur_va as nat, PAGE_SIZE as nat);
573
574                vstd_extra::arithmetic::lemma_nat_align_up_sound(
575                    split_self.cur_va as nat, PAGE_SIZE as nat);
576
577                CursorView::<KernelPtConfig>::lemma_split_while_huge_preserves_cur_va(
578                    old_cursor_model, PAGE_SIZE);
579
580                assert(aligned_nat == vstd_extra::arithmetic::nat_align_up(
581                    old_cursor_owner_va.to_vaddr() as nat, PAGE_SIZE as nat));
582                old_cursor_owner_va.align_diff(1);
583
584                assert(vstd_extra::arithmetic::nat_align_down(
585                    old_cursor_owner_va.to_vaddr() as nat, PAGE_SIZE as nat)
586                    == old_cursor_owner_va.to_vaddr() as nat);
587                
588                cursor_owner.va.reflect_prop(cursor.inner.va);
589            }
590        }
591
592        Self { range }
593    }
594
595    /// Creates a kernel virtual area and maps untracked frames into it.
596    ///
597    /// The created virtual area will have a size of `area_size`, and the
598    /// physical addresses will be mapped starting from `map_offset` in
599    /// the area.
600    ///
601    /// You can provide a `0..0` physical range to create a virtual area without
602    /// mapping any physical memory.
603    ///
604    /// # Panics
605    ///
606    /// This function panics if
607    ///  - the area size is not a multiple of [`PAGE_SIZE`];
608    ///  - the map offset is not aligned to [`PAGE_SIZE`];
609    ///  - the provided physical range is not aligned to [`PAGE_SIZE`];
610    ///  - the map offset plus the length of the physical range exceeds the
611    ///    area size;
612    ///  - the provided physical range contains tracked physical addresses.
613    #[verus_spec(
614        with Tracked(owner): Tracked<KVirtAreaOwner>,
615            Tracked(regions): Tracked<&mut MetaRegionOwners>,
616            Tracked(guards): Tracked<&mut Guards<'a, KernelPtConfig>>,
617            Tracked(guard_perm): Tracked<GuardPerm<'a, KernelPtConfig>>
618    )]
619    pub unsafe fn map_untracked_frames<A: InAtomicMode + 'a, 'a>(
620        area_size: usize,
621        map_offset: usize,
622        pa_range: Range<Paddr>,
623        prop: PageProperty,
624    ) -> Self
625        requires
626            old(regions).inv(),
627            0 < area_size,
628            area_size <= usize::MAX / 2,
629            map_offset % PAGE_SIZE == 0,
630            map_offset < area_size,
631            // The PA range must fit within [map_offset, area_size) of the virtual area.
632            pa_range.end as nat - pa_range.start as nat <= area_size as nat - map_offset as nat,
633            // Physical addresses must be within physical memory bounds.
634            pa_range.end <= MAX_PADDR,
635            // Physical range endpoints must be PAGE_SIZE-aligned (function panics otherwise).
636            pa_range.start % PAGE_SIZE == 0,
637            pa_range.end % PAGE_SIZE == 0,
638            // The physical range must not already be mapped in any page table.
639            old(regions).paddr_range_not_mapped(pa_range),
640    {
641        #[cfg(feature = "allow_panic")]
642        assert!(pa_range.start % PAGE_SIZE == 0);
643        #[cfg(feature = "allow_panic")]
644        assert!(pa_range.end % PAGE_SIZE == 0);
645        #[cfg(feature = "allow_panic")]
646        assert!(area_size % PAGE_SIZE == 0);
647        #[cfg(feature = "allow_panic")]
648        assert!(map_offset % PAGE_SIZE == 0);
649        #[cfg(feature = "allow_panic")]
650        assert!(map_offset + (pa_range.end - pa_range.start) <= area_size);
651
652        proof {
653            kvirt_alloc_succeeds(area_size);
654        }
655
656        let range = KVIRT_AREA_ALLOCATOR.alloc(area_size).unwrap();
657
658        proof {
659            kvirt_alloc_range_bounds(area_size, map_offset, range);
660        }
661
662        if pa_range.start < pa_range.end {
663            let len = pa_range.end - pa_range.start;
664            let va_range = range.start + map_offset..range.start + map_offset + len;
665
666            proof {
667                axiom_kernel_range_valid(va_range);
668//                assert(crate::mm::page_table::Cursor::<KernelPtConfig, A>::cursor_new_success_conditions(&va_range));
669            }
670
671            let page_table = {
672                proof_decl! {
673                    let tracked mut _kpt_owner: Option<&PageTableOwner<KernelPtConfig>> = None;
674                }
675                get_kernel_page_table(Tracked(&mut _kpt_owner), Tracked(regions), Tracked(guards))
676            };
677            let preempt_guard = disable_preempt::<A>();
678
679            // Save regions state before cursor_mut so postcondition trigger can fire.
680            let ghost pre_cursor_regions: MetaRegionOwners = *regions;
681
682            let (mut cursor, Tracked(cursor_owner)) =
683            (#[verus_spec(with Tracked(owner.pt_owner), Tracked(guard_perm), Tracked(regions), Tracked(guards))]
684                page_table.cursor_mut(preempt_guard, &va_range)).unwrap();
685
686            let pages = collect_largest_pages(va_range.start, pa_range.start, len);
687
688            for (pa, level) in it: pages.into_iter()
689                invariant
690                    cursor.inner.invariants(cursor_owner, *regions, *guards),
691                    forall |i: int| 0 <= i < it.elements.len() ==> (#[trigger] it.elements[i]).0 % PAGE_SIZE == 0,
692                    forall |i: int| 0 <= i < it.elements.len() ==>
693                        1 <= (#[trigger] it.elements[i]).1<= NR_LEVELS,
694                    forall |i: int| 0 <= i < it.elements.len() ==>
695                        (#[trigger] it.elements[i]).1 <= KernelPtConfig::HIGHEST_TRANSLATION_LEVEL(),
696                    sum_page_sizes_spec(it.elements, 0, it.elements.len() as int) == len as nat,
697                    forall |i: int| 0 <= i < it.elements.len() ==>
698                        (va_range.start as nat + #[trigger] sum_page_sizes_spec(it.elements, 0, i))
699                            % page_size(it.elements[i].1) as nat == 0,
700                    // VA tracking invariant: cursor has advanced past sum of processed pages.
701                    cursor.inner.va as nat
702                        == va_range.start as nat
703                            + sum_page_sizes_spec(it.elements, 0, it.pos as int),
704                    cursor.inner.barrier_va.end == va_range.start + len,
705                    it.pos <= it.elements.len(),
706                    // PA tracking: element[i].0 == pa_range.start + sum of preceding sizes.
707                    forall |i: int| #![auto] 0 <= i < it.elements.len() ==>
708                        it.elements[i].0 as nat
709                            == pa_range.start as nat + sum_page_sizes_spec(it.elements, 0, i),
710                    // Remaining PA range is not mapped.
711                    regions.paddr_range_not_mapped(
712                        (pa_range.start as nat
713                            + sum_page_sizes_spec(it.elements, 0, it.pos as int)) as usize
714                        ..pa_range.end
715                    ),
716                    // pa_range.end == pa_range.start + len in nat (constant throughout loop).
717                    pa_range.end as nat == pa_range.start as nat + len as nat,
718                    // guard_level == NR_LEVELS (from cursor_mut postcondition, preserved by map).
719                    cursor.inner.guard_level == NR_LEVELS as u8,
720                    // Physical address bound (function precondition; constant throughout loop).
721                    pa_range.end <= MAX_PADDR,
722            {
723                let pos: Ghost<int> = Ghost(it.pos as int);
724
725                proof {
726                    sum_page_sizes_extend_right(it.elements, 0, pos@);
727                    sum_page_sizes_mono(it.elements, 0, pos@ + 1, it.elements.len() as int);
728                }
729
730                let item = MappedItem::Untracked(pa, level, prop);
731                proof {
732                    lemma_page_size_ge_page_size(level);
733                    assert(pa as nat + page_size(level) as nat <= pa_range.end as nat);
734                    assert(pa < MAX_PADDR);
735                }
736                proof_decl! {
737                    let tracked mut entry_owner =
738                        EntryOwner::<KernelPtConfig>::new_untracked_frame(pa, level, prop);
739                    entry_owner.in_scope = false;
740                }
741
742                let ghost old_cursor_model: CursorView<KernelPtConfig> =
743                    cursor.inner.model(cursor_owner);
744                let ghost old_cursor_owner_va = cursor_owner.va;
745                let ghost old_va: nat = cursor.inner.va as nat;
746
747                proof {
748                    cursor_owner.view_preserves_inv(); // old_cursor_model.inv()
749                    cursor_owner.va.reflect_prop(cursor.inner.va);
750
751                    KernelPtConfig::item_into_raw_spec_untracked(pa, level, prop);
752
753                    // pa_range.end == pa_range.start + len (in nat) — from loop invariant.
754                    sum_page_sizes_extend_right(it.elements, 0, pos@);
755                    sum_page_sizes_mono(
756                        it.elements, 0, pos@ + 1, it.elements.len() as int);
757                }
758
759                // Save ghost copy of regions before map for post-map invariant maintenance.
760                let ghost pre_map_regions: MetaRegionOwners = *regions;
761
762                proof {
763                    KernelPtConfig::axiom_kernel_htl_lt_nr_levels();
764                    assert(!cursor.map_panic_conditions(item));
765                    assert(cursor.item_wf(item, entry_owner));
766                }
767                // SAFETY: The caller of `map_untracked_frames` has ensured the safety of this mapping.
768                assume(CursorMut::<'a, KernelPtConfig, A>::item_slot_in_regions(item, *regions));
769                #[verus_spec(with Tracked(&mut cursor_owner), Tracked(entry_owner), Tracked(regions), Tracked(guards))]
770                let _ = cursor.map(item);
771
772                // Post-map: maintain the VA tracking invariant.
773                proof {
774                    KernelPtConfig::item_into_raw_spec_untracked(pa, level, prop);
775                    let level_raw = KernelPtConfig::item_into_raw_spec(item).1;
776                    
777                    let split_self = old_cursor_model.split_while_huge(
778                        page_size(level_raw));
779
780                    CursorView::<KernelPtConfig>::lemma_split_while_huge_preserves_cur_va(
781                        old_cursor_model, page_size(level_raw));
782
783                    let aligned_nat = vstd_extra::arithmetic::nat_align_up(
784                        old_cursor_owner_va.to_vaddr() as nat,
785                        page_size(level_raw) as nat,
786                    );
787                    
788                    lemma_page_size_ge_page_size(level_raw);
789
790                    vstd_extra::arithmetic::lemma_nat_align_up_sound(
791                        old_cursor_owner_va.to_vaddr() as nat,
792                        page_size(level_raw) as nat,
793                    );
794
795                    old_cursor_owner_va.align_diff(level_raw as int);
796                    
797                    sum_page_sizes_extend_right(it.elements, 0, pos@);
798                    
799                    let pa_next_nat = pa_range.start as nat + sum_page_sizes_spec(it.elements, 0, pos@ + 1);
800                    assert(pa_next_nat == pa as nat + page_size(level) as nat);
801
802                    if pos@ + 1 < it.elements.len() as int {
803                        let next_level = it.elements[pos@ + 1].1;
804                        sum_page_sizes_extend_right(it.elements, 0, pos@ + 1);
805                        sum_page_sizes_mono(it.elements, 0, pos@ + 2, it.elements.len() as int);
806                        lemma_page_size_ge_page_size(next_level);
807                    }
808                }
809            }
810        }
811
812        Self { range }
813    }
814}
815
816/*impl Drop for KVirtArea {
817    fn drop(&mut self) {
818        // 1. unmap all mapped pages.
819        let page_table = KERNEL_PAGE_TABLE.unwrap();
820        let range = self.start()..self.end();
821        //let preempt_guard = disable_preempt();
822        let mut cursor = page_table.cursor_mut(&range).unwrap();
823        loop {
824            // SAFETY: The range is under `KVirtArea` so it is safe to unmap.
825            let Some(frag) = (unsafe { cursor.take_next(self.end() - cursor.virt_addr()) }) else {
826                break;
827            };
828            drop(frag);
829        }
830        // 2. free the virtual block
831        //KVIRT_AREA_ALLOCATOR.free(range);
832    }
833}*/
834} // verus!