ostd/mm/kspace/
kvirt_area.rs

1// SPDX-License-Identifier: MPL-2.0
2//! Kernel virtual memory allocation
3use vstd::prelude::*;
4
5use vstd_extra::arithmetic::nat_align_down;
6use vstd_extra::ownership::{InvView, ModelOf, OwnerOf};
7use vstd_extra::prelude::Inv;
8
9use core::marker::PhantomData;
10use core::ops::Range;
11
12use super::{KERNEL_PAGE_TABLE, VMALLOC_VADDR_RANGE, KERNEL_BASE_VADDR, KERNEL_END_VADDR};
13use crate::{
14    mm::{
15        frame::{untyped::AnyUFrameMeta, Frame, Segment},
16        kspace::{KernelPtConfig, MappedItem},
17        largest_pages,
18        page_prop::PageProperty,
19        Paddr, Vaddr, PAGE_SIZE,
20        page_table::{
21            is_valid_range_spec, page_size,
22            Child, CursorMut, PageTable, PageTableConfig,
23        },
24    },
25};
26
27use crate::specs::arch::mm::{MAX_PADDR, PAGE_SIZE as SPEC_PAGE_SIZE, NR_LEVELS};
28use crate::mm::PagingConstsTrait;
29use crate::specs::arch::PagingConsts;
30use crate::mm::nr_subpage_per_huge;
31use crate::specs::task::InAtomicMode;
32use crate::specs::mm::page_table::cursor::{CursorOwner, CursorView};
33use crate::specs::mm::page_table::*;
34use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
35use crate::mm::page_table::PageTableGuard;
36use crate::mm::frame::DynFrame;
37use crate::mm::kspace::AnyFrameMeta;
38use crate::specs::mm::frame::mapping::frame_to_index_spec;
39
40//static KVIRT_AREA_ALLOCATOR: RangeAllocator = RangeAllocator::new(VMALLOC_VADDR_RANGE);
41
42verus! {
43
44/// Spec representation of Frame<T> as DynFrame (used when the actual conversion is opaque).
45pub open spec fn frame_as_dynframe<T: AnyFrameMeta>(frame: Frame<T>) -> DynFrame {
46    DynFrame { ptr: frame.ptr, _marker: PhantomData }
47}
48
49/// Converts `Frame<T>` to `DynFrame`, with a spec postcondition connecting the result
50/// to the spec function `frame_as_dynframe`. The `Into` impl uses `transmute`, which is
51/// `external_body`, so the equality is admitted.
52/// TODO: use the conversions in `frame/mod.rs`
53fn frame_into_dynframe<T: AnyUFrameMeta>(frame: Frame<T>) -> (res: DynFrame)
54    ensures
55        res == frame_as_dynframe(frame),
56{
57    proof { admit(); }
58    frame.into()
59}
60
61/// Spec function: the entry owner correctly matches the frame and property for mapping.
62pub open spec fn frame_entry_wf<T: AnyFrameMeta>(
63    frame: Frame<T>,
64    prop: PageProperty,
65    entry_owner: EntryOwner<KernelPtConfig>,
66) -> bool {
67    let frame_mss = DynFrame { ptr: frame.ptr, _marker: PhantomData };
68    let item = MappedItem::Tracked(frame_mss, prop);
69    let (pa, level, prop_from_item) = KernelPtConfig::item_into_raw_spec(item);
70    Child::Frame(pa, level, prop_from_item).wf(entry_owner)
71}
72
73#[derive(Debug)]
74pub struct RangeAllocError;
75
76pub struct RangeAllocator;
77
78impl RangeAllocator {
79    #[verifier::external_body]
80    pub const fn new(_r: core::ops::Range<Vaddr>) -> Self {
81        Self
82    }
83
84    #[verifier::external_body]
85    pub fn alloc(&self, size: usize) -> (r: Result<core::ops::Range<Vaddr>, RangeAllocError>)
86        ensures
87            r == kvirt_alloc_spec(size),
88    {
89        unimplemented!()
90    }
91}
92
93#[verifier::external_body]
94pub fn disable_preempt<'a, G: InAtomicMode + 'a>() -> &'a G {
95    unimplemented!()
96}
97
98exec static KVIRT_AREA_ALLOCATOR: RangeAllocator = RangeAllocator::new(VMALLOC_VADDR_RANGE);
99
100/// Total size (in bytes) of the pages `elems[from..to]`.
101pub open spec fn sum_page_sizes_spec(elems: Seq<(Paddr, u8)>, from: int, to: int) -> nat
102    decreases (to - from) when from <= to
103{
104    if from >= to {
105        0nat
106    } else {
107        page_size(elems[from].1) as nat
108            + sum_page_sizes_spec(elems, from + 1, to)
109    }
110}
111
112/// `sum(from, to+1) == sum(from, to) + page_size(elems[to])`.
113proof fn sum_page_sizes_extend_right(elems: Seq<(Paddr, u8)>, from: int, to: int)
114    requires
115        0 <= from <= to,
116        to < elems.len() as int,
117    ensures
118        sum_page_sizes_spec(elems, from, to + 1)
119            == sum_page_sizes_spec(elems, from, to)
120                + page_size(elems[to].1) as nat,
121    decreases to - from,
122{
123    if from < to {
124        sum_page_sizes_extend_right(elems, from + 1, to);
125        // Help Verus: unfold sum_page_sizes_spec(elems, from, to) and (from, to+1)
126        assert(sum_page_sizes_spec(elems, from, to) ==
127            page_size(elems[from].1) as nat
128                + sum_page_sizes_spec(elems, from + 1, to));
129        assert(sum_page_sizes_spec(elems, from, to + 1) ==
130            page_size(elems[from].1) as nat
131                + sum_page_sizes_spec(elems, from + 1, to + 1));
132    } else {
133        // from == to; explicitly unfold both sides
134        assert(sum_page_sizes_spec(elems, from, to) == 0nat);
135        assert(sum_page_sizes_spec(elems, from, to + 1) ==
136            page_size(elems[from].1) as nat
137                + sum_page_sizes_spec(elems, from + 1, to + 1));
138        assert(sum_page_sizes_spec(elems, from + 1, to + 1) == 0nat);
139    }
140}
141
142/// `sum(from, to1) <= sum(from, to2)` when `to1 <= to2`.
143proof fn sum_page_sizes_mono(elems: Seq<(Paddr, u8)>, from: int, to1: int, to2: int)
144    requires
145        0 <= from <= to1 <= to2,
146        to2 <= elems.len() as int,
147    ensures
148        sum_page_sizes_spec(elems, from, to1) <= sum_page_sizes_spec(elems, from, to2),
149    decreases to2 - to1,
150{
151    if to1 < to2 {
152        sum_page_sizes_extend_right(elems, from, to2 - 1);
153        sum_page_sizes_mono(elems, from, to1, to2 - 1);
154    }
155}
156
157/// Collects the output of `largest_pages` into a `Vec` so that Verus can iterate
158/// over it with loop invariants (`impl Iterator` does not implement `ForLoopGhostIteratorNew`).
159///
160/// The postconditions reflect provable properties of every `(pa, level)` pair emitted by
161/// `largest_pages`:
162/// - PA alignment and level-validity follow from the loop guard in `largest_pages`.
163/// - The sum of page sizes equals `len` (the iterator covers exactly [pa, pa+len)).
164/// - At each step, the running VA is aligned to the current page size.
165#[verifier::external_body]
166fn collect_largest_pages(
167    va: Vaddr,
168    pa: Paddr,
169    len: usize,
170) -> (res: alloc::vec::Vec<(Paddr, u8)>)
171    ensures
172        forall |i: int| 0 <= i < res@.len() ==> (#[trigger] res@[i]).0 % PAGE_SIZE == 0,
173        forall |i: int| 0 <= i < res@.len() ==> 1 <= (#[trigger] res@[i]).1 <= NR_LEVELS,
174        forall |i: int| 0 <= i < res@.len() ==>
175            (#[trigger] res@[i]).1 <= KernelPtConfig::HIGHEST_TRANSLATION_LEVEL(),
176        sum_page_sizes_spec(res@, 0, res@.len() as int) == len as nat,
177        forall |i: int| 0 <= i < res@.len() ==>
178            (va as nat + #[trigger] sum_page_sizes_spec(res@, 0, i))
179                % page_size(res@[i].1) as nat == 0,
180        // PA tracking: each element's physical address equals pa + sum of preceding page sizes.
181        forall |i: int| 0 <= i < res@.len() ==>
182            (#[trigger] res@[i]).0 as nat == pa as nat + sum_page_sizes_spec(res@, 0, i),
183{
184    largest_pages::<KernelPtConfig>(va, pa, len).collect()
185}
186
187#[verifier::external_body]
188pub(crate) fn get_kernel_page_table() -> (r: &'static PageTable<KernelPtConfig>)
189    ensures
190        true,
191{
192    KERNEL_PAGE_TABLE.get().unwrap()
193}
194
195// Axiomatized spec for alloc - cannot read exec static in proof mode.
196pub uninterp spec fn kvirt_alloc_spec(size: usize) -> Result<core::ops::Range<Vaddr>, RangeAllocError>;
197
198pub axiom fn kvirt_alloc_succeeds(area_size: usize)
199    requires
200        0 < area_size <= usize::MAX / 2,
201    ensures
202        kvirt_alloc_spec(area_size).is_ok();
203
204pub axiom fn kvirt_alloc_range_bounds(area_size: usize, map_offset: usize, r: core::ops::Range<Vaddr>)
205    ensures
206        kvirt_alloc_spec(area_size) == Ok::<core::ops::Range<Vaddr>, RangeAllocError>(r)
207        ==> r.start <= r.end
208            && (r.end - r.start) >= area_size
209            && map_offset <= r.end - r.start
210            && r.start + map_offset <= usize::MAX
211            && r.start % PAGE_SIZE == 0
212            && r.end % PAGE_SIZE == 0
213            && KERNEL_BASE_VADDR <= r.start
214            && r.end <= KERNEL_END_VADDR
215;
216
217/// Kernel ranges within [KERNEL_BASE_VADDR, KERNEL_END_VADDR] with alignment are valid for
218/// KernelPtConfig (which uses sign-extended high-half addresses).
219pub axiom fn axiom_kernel_range_valid(r: core::ops::Range<Vaddr>)
220    ensures
221        (KERNEL_BASE_VADDR <= r.start
222            && r.end <= KERNEL_END_VADDR
223            && r.start < r.end
224            && r.start % PAGE_SIZE == 0
225            && r.end % PAGE_SIZE == 0)
226        ==> is_valid_range_spec::<KernelPtConfig>(&r);
227
228/// Kernel virtual area.
229///
230/// A kernel virtual area manages a range of memory in [`VMALLOC_VADDR_RANGE`].
231/// It can map a portion or the entirety of its virtual memory pages to
232/// physical memory, whether tracked with metadata or not.
233///
234/// It is the caller's responsibility to ensure TLB coherence before using the
235/// mapped virtual address on a certain CPU.
236//
237// FIXME: This caller-ensured design is very error-prone. A good option is to
238// use a guard the pins the CPU and ensures TLB coherence while accessing the
239// `KVirtArea`. However, `IoMem` need some non trivial refactoring to support
240// being implemented on a `!Send` and `!Sync` guard.
241#[derive(Debug)]
242pub struct KVirtArea {
243    pub range: Range<Vaddr>,
244}
245
246pub tracked struct KVirtAreaOwner {
247    pt_owner: PageTableOwner<KernelPtConfig>,
248}
249
250impl KVirtAreaOwner {
251    /// The [`CursorView`] at the page containing the given address, representing the kernel page
252    /// table's abstract state for query purposes.
253    pub closed spec fn cursor_view_at(self, addr: Vaddr) -> CursorView<KernelPtConfig> {
254        CursorView {
255            cur_va: nat_align_down(addr as nat, SPEC_PAGE_SIZE as nat) as Vaddr,
256            mappings: self.pt_owner.view_rec(self.pt_owner.0.value.path),
257            phantom: PhantomData,
258        }
259    }
260}
261
262impl Inv for KVirtArea {
263    open spec fn inv(self) -> bool {
264        &&& KERNEL_BASE_VADDR <= self.range.start
265        &&& self.range.end <= KERNEL_END_VADDR
266    }
267}
268
269#[verus_verify]
270impl KVirtArea {
271
272    pub fn start(&self) -> Vaddr {
273        self.range.start
274    }
275
276    pub fn end(&self) -> Vaddr {
277        self.range.end
278    }
279
280    pub fn range(&self) -> Range<Vaddr> {
281        self.range.start..self.range.end
282    }
283
284    #[cfg(ktest)]
285    pub fn len(&self) -> usize {
286        self.range.len()
287    }
288
289    /// Whether there is a mapped item at the page containing the address.
290    pub open spec fn query_some_condition(self, owner: KVirtAreaOwner, addr: Vaddr) -> bool {
291        owner.cursor_view_at(addr).present()
292    }
293
294    /// Postcondition when a mapping exists at the page.
295    ///
296    /// The returned item corresponds to the abstract mapping given by
297    /// [`query_item_spec`](CursorView::query_item_spec).
298    pub open spec fn query_some_ensures(
299        self,
300        owner: KVirtAreaOwner,
301        addr: Vaddr,
302        r: Option<super::MappedItem>,
303    ) -> bool {
304        r is Some && owner.cursor_view_at(addr).query_item_spec(r.unwrap()) is Some
305    }
306
307    /// Postcondition when no mapping exists at the page.
308    pub open spec fn query_none_ensures(r: Option<super::MappedItem>) -> bool {
309        r is None
310    }
311
312    /// Queries the mapping at the given virtual address.
313    ///
314    /// Returns the mapped item at the page containing `addr`, or `None` if there is no mapping.
315    ///
316    /// ## Preconditions
317    /// - The kernel virtual area invariant holds ([`inv`](Inv::inv)).
318    /// - The address is within the virtual area's range.
319    /// ## Postconditions
320    /// - If there is a mapped item at the page containing the address ([`query_some_condition`]),
321    /// it is returned ([`query_some_ensures`]).
322    /// - If there is no mapping at that page, `None` is returned ([`query_none_ensures`]).
323    #[verifier::external_body]
324    #[verus_spec(r =>
325        with Tracked(owner): Tracked<KVirtAreaOwner>,
326        requires
327            self.inv(),
328            self.range.start <= addr && addr < self.range.end,
329        ensures
330            self.query_some_condition(owner, addr) ==> self.query_some_ensures(owner, addr, r),
331            !self.query_some_condition(owner, addr) ==> Self::query_none_ensures(r),
332    )]
333    pub fn query<A: InAtomicMode>(&self, addr: Vaddr) -> Option<super::MappedItem>
334    {
335        use align_ext::AlignExt;
336
337        let start = addr.align_down(PAGE_SIZE);
338        let vaddr = start..start + PAGE_SIZE;
339        let page_table = get_kernel_page_table();
340
341        let preempt_guard = disable_preempt::<A>();
342        let (mut cursor, _cursor_owner) = page_table.cursor(preempt_guard, &vaddr).unwrap();
343        cursor.query().unwrap().1
344    }
345
346    /// Create a kernel virtual area and map tracked pages into it.
347    ///
348    /// The created virtual area will have a size of `area_size`, and the pages
349    /// will be mapped starting from `map_offset` in the area.
350    ///
351    /// # Panics
352    ///
353    /// This function panics if
354    ///  - the area size is not a multiple of [`PAGE_SIZE`];
355    ///  - the map offset is not aligned to [`PAGE_SIZE`];
356    ///  - the map offset plus the size of the pages exceeds the area size.
357    // TODO: T should be any AnyFrameMeta + Repr<MetaSlotStorage>
358    #[verus_spec(
359        with Tracked(owner): Tracked<KVirtAreaOwner>,
360            Tracked(entry_owners): Tracked<&mut Seq<EntryOwner<KernelPtConfig>>>,
361            Tracked(regions): Tracked<&mut MetaRegionOwners>,
362            Tracked(guards): Tracked<&mut Guards<'a, KernelPtConfig>>,
363            Tracked(guard_perm): Tracked<GuardPerm<'a, KernelPtConfig>>
364    )]
365    pub fn map_frames<'a, T: AnyUFrameMeta, A: InAtomicMode + 'a>(
366        area_size: usize,
367        map_offset: usize,
368        frames: alloc::vec::Vec<Frame<T>>,
369        prop: PageProperty,
370    ) -> Self
371        requires
372            0 < area_size,
373            map_offset < area_size,
374            map_offset % PAGE_SIZE == 0,
375            area_size <= usize::MAX / 2,
376            old(entry_owners).len() == frames.len(),
377            frames.len() as int * PAGE_SIZE as int + map_offset as int <= area_size as int,
378            forall |i: int| 0 <= i < old(entry_owners).len() ==> (#[trigger] old(entry_owners)[i]).inv(),
379            forall |i: int| 0 <= i < frames.len() ==>
380                frame_entry_wf(frames[i], prop, #[trigger] old(entry_owners)[i]),
381            forall |i: int| 0 <= i < frames.len() ==>
382                CursorMut::<'a, KernelPtConfig, A>::item_not_mapped(
383                    MappedItem::Tracked(frame_as_dynframe(#[trigger] frames[i]), prop),
384                    *old(regions),
385                ),
386            // Frames have distinct physical addresses (follows from linearity of slot_perm ownership).
387            forall |i: int, j: int| 0 <= i < j < frames.len() ==>
388                (#[trigger] old(entry_owners)[i]).frame.unwrap().mapped_pa != (#[trigger] old(entry_owners)[j]).frame.unwrap().mapped_pa,
389    {
390        proof {
391            kvirt_alloc_succeeds(area_size);            
392        }
393        
394        let range = KVIRT_AREA_ALLOCATOR.alloc(area_size).unwrap();
395
396        proof {
397            kvirt_alloc_range_bounds(area_size, map_offset, range);
398        }
399
400        let cursor_range = range.start + map_offset..range.end;
401
402        proof {
403            axiom_kernel_range_valid(cursor_range);
404        }
405
406        let page_table = get_kernel_page_table();
407        let preempt_guard = disable_preempt::<A>();
408
409        let (mut cursor, Tracked(cursor_owner)) = 
410        (#[verus_spec(with Tracked(owner.pt_owner), Tracked(guard_perm), Tracked(regions), Tracked(guards))]
411            page_table.cursor_mut(preempt_guard, &cursor_range)).unwrap();
412
413        let ghost init_frames_len = frames.len();
414
415        assert forall |j: int| #![trigger entry_owners[j]]
416            0 <= j < frames@.len()
417            implies regions.slot_owners[frame_to_index_spec(entry_owners[j].frame.unwrap().mapped_pa)].path_if_in_pt is None
418        by {
419            let item = MappedItem::Tracked(frame_as_dynframe(frames@[j]), prop);
420            KernelPtConfig::item_into_raw_spec_tracked_level(item);
421            let (pa, level, _) = KernelPtConfig::item_into_raw_spec(item);
422            assert(level == 1u8);
423            vstd::arithmetic::mul::lemma_mul_by_zero_is_zero(
424                nr_subpage_per_huge::<PagingConsts>().ilog2() as int);
425            vstd::arithmetic::power::lemma_pow0(2int);
426        };
427
428        for frame in it: frames.into_iter()
429            invariant
430                cursor.inner.invariants(cursor_owner, *regions, *guards),
431                forall |entry: EntryOwner<KernelPtConfig>| entry_owners.contains(entry) ==>
432                    cursor.map_cursor_requires(cursor_owner, *guards),
433                forall |i: int| 0 <= i < entry_owners.len() ==> (#[trigger]entry_owners[i]).inv(),
434                cursor.inner.va % PAGE_SIZE == 0,
435                cursor.inner.va as int + entry_owners.len() as int * PAGE_SIZE as int
436                    <= cursor.inner.barrier_va.end as int,
437                cursor.inner.barrier_va.end % PAGE_SIZE == 0,
438                it.elements.len() == init_frames_len,
439                init_frames_len <= old(entry_owners).len(),
440                entry_owners.len() == old(entry_owners).len() - it.pos,
441                forall |j: int|
442                    0 <= j < entry_owners.len() && it.pos + j < it.elements.len() ==>
443                    frame_entry_wf(it.elements[it.pos + j], prop, entry_owners[j]),
444                // item_not_mapped holds in current regions for all remaining frames
445                forall |j: int| #![trigger entry_owners[j]]
446                    0 <= j < it.elements.len() - it.pos ==>
447                    regions.slot_owners[frame_to_index_spec(entry_owners[j].frame.unwrap().mapped_pa)].path_if_in_pt is None,
448                // Remaining frames have distinct physical addresses
449                forall |i: int, j: int| 0 <= i < j < it.elements.len() - it.pos ==>
450                    (#[trigger]entry_owners[i]).frame.unwrap().mapped_pa != (#[trigger] entry_owners[j]).frame.unwrap().mapped_pa,
451        {
452            proof {
453                assert(entry_owners.contains(entry_owners[0]));
454                assert(cursor.map_cursor_requires(cursor_owner, *guards));
455                assert(frame_entry_wf(frame, prop, entry_owners[0]));
456            }
457
458            let ghost cur_mapped_pa: usize = entry_owners[0].frame.unwrap().mapped_pa;
459            let ghost cur_pa_from_wf: usize = KernelPtConfig::item_into_raw_spec(
460                MappedItem::Tracked(frame_as_dynframe(it.elements.index(it.pos as int)), prop)).0;
461            let ghost pre_remove_owners: Seq<EntryOwner<KernelPtConfig>> = *entry_owners;
462
463            let tracked mut entry_owner = entry_owners.tracked_remove(0);
464
465            let dynframe = frame_into_dynframe(frame);
466            // Now Verus knows: dynframe == frame_as_dynframe(it.elements[it.pos])
467            let item = MappedItem::Tracked(dynframe, prop);
468            assert(cursor.inner.invariants(cursor_owner, *regions, *guards));
469
470            let ghost regions_before_map = *regions;
471            let ghost old_cursor_model: CursorView<KernelPtConfig> = cursor.inner.model(cursor_owner);
472            let ghost old_cursor_owner_va = cursor_owner.va;
473            proof {
474                cursor_owner.view_preserves_inv(); // old_cursor_model.inv()
475                cursor_owner.va.reflect_prop(cursor.inner.va);
476                let (pa, level, prop_from_item) = KernelPtConfig::item_into_raw_spec(item);
477                KernelPtConfig::item_into_raw_spec_level_bounds(item);
478                KernelPtConfig::item_into_raw_spec_tracked_level(item);
479                lemma_va_align_page_size_level_1(cursor.inner.va);
480                cursor_owner.locked_range_page_aligned();
481                let ghost diff: int = cursor.inner.barrier_va.end as int - cursor.inner.va as int;
482                vstd::arithmetic::mul::lemma_mul_by_zero_is_zero(
483                    nr_subpage_per_huge::<PagingConsts>().ilog2() as int,
484                );
485                vstd::arithmetic::power::lemma_pow0(2int);
486
487                KernelPtConfig::item_into_raw_spec_tracked_level(item);
488            }
489
490            // SAFETY: The constructor of the `KVirtArea` has already ensured
491            // that this mapping does not affect kernel's memory safety.
492            assert(CursorMut::<'a, KernelPtConfig, A>::item_slot_in_regions(
493                item, *regions,
494            )) by { admit() };
495            #[verus_spec(with Tracked(&mut cursor_owner), Tracked(entry_owner), Tracked(regions), Tracked(guards))]
496            let res = cursor.map(item);
497
498            proof {
499                pre_remove_owners.remove_ensures(0);
500                let cur_idx = frame_to_index_spec(cur_mapped_pa);
501
502                let (pa, level, prop_) = KernelPtConfig::item_into_raw_spec(item);
503                KernelPtConfig::item_into_raw_spec_tracked_level(item);
504
505                let split_self = old_cursor_model.split_while_huge(PAGE_SIZE);
506
507                let aligned_nat: nat = vstd_extra::arithmetic::nat_align_up(
508                    split_self.cur_va as nat, PAGE_SIZE as nat);
509
510                vstd_extra::arithmetic::lemma_nat_align_up_sound(
511                    split_self.cur_va as nat, PAGE_SIZE as nat);
512
513                CursorView::<KernelPtConfig>::lemma_split_while_huge_preserves_cur_va(
514                    old_cursor_model, PAGE_SIZE);
515
516                assert(aligned_nat == vstd_extra::arithmetic::nat_align_up(
517                    old_cursor_owner_va.to_vaddr() as nat, PAGE_SIZE as nat));
518                old_cursor_owner_va.align_diff(1);
519
520                assert(vstd_extra::arithmetic::nat_align_down(
521                    old_cursor_owner_va.to_vaddr() as nat, PAGE_SIZE as nat)
522                    == old_cursor_owner_va.to_vaddr() as nat);
523                
524                cursor_owner.va.reflect_prop(cursor.inner.va);
525            }
526        }
527
528        Self { range }
529    }
530
531    /// Creates a kernel virtual area and maps untracked frames into it.
532    ///
533    /// The created virtual area will have a size of `area_size`, and the
534    /// physical addresses will be mapped starting from `map_offset` in
535    /// the area.
536    ///
537    /// You can provide a `0..0` physical range to create a virtual area without
538    /// mapping any physical memory.
539    ///
540    /// # Panics
541    ///
542    /// This function panics if
543    ///  - the area size is not a multiple of [`PAGE_SIZE`];
544    ///  - the map offset is not aligned to [`PAGE_SIZE`];
545    ///  - the provided physical range is not aligned to [`PAGE_SIZE`];
546    ///  - the map offset plus the length of the physical range exceeds the
547    ///    area size;
548    ///  - the provided physical range contains tracked physical addresses.
549    #[verus_spec(
550        with Tracked(owner): Tracked<KVirtAreaOwner>,
551            Tracked(regions): Tracked<&mut MetaRegionOwners>,
552            Tracked(guards): Tracked<&mut Guards<'a, KernelPtConfig>>,
553            Tracked(guard_perm): Tracked<GuardPerm<'a, KernelPtConfig>>
554    )]
555    pub unsafe fn map_untracked_frames<A: InAtomicMode + 'a, 'a>(
556        area_size: usize,
557        map_offset: usize,
558        pa_range: Range<Paddr>,
559        prop: PageProperty,
560    ) -> Self
561        requires
562            0 < area_size,
563            area_size <= usize::MAX / 2,
564            map_offset % PAGE_SIZE == 0,
565            map_offset < area_size,
566            // The PA range must fit within [map_offset, area_size) of the virtual area.
567            pa_range.end as nat - pa_range.start as nat <= area_size as nat - map_offset as nat,
568            // Physical addresses must be within physical memory bounds.
569            pa_range.end <= MAX_PADDR,
570            // Physical range endpoints must be PAGE_SIZE-aligned (function panics otherwise).
571            pa_range.start % PAGE_SIZE == 0,
572            pa_range.end % PAGE_SIZE == 0,
573            // The physical range must not already be mapped in any page table.
574            old(regions).paddr_range_not_mapped(pa_range),
575    {
576        proof {
577            kvirt_alloc_succeeds(area_size);
578        }
579
580        let range = KVIRT_AREA_ALLOCATOR.alloc(area_size).unwrap();
581
582        proof {
583            kvirt_alloc_range_bounds(area_size, map_offset, range);
584        }
585
586        if pa_range.start < pa_range.end {
587            let len = pa_range.end - pa_range.start;
588            let va_range = range.start + map_offset..range.start + map_offset + len;
589
590            proof {
591                axiom_kernel_range_valid(va_range);
592//                assert(crate::mm::page_table::Cursor::<KernelPtConfig, A>::cursor_new_success_conditions(&va_range));
593            }
594
595            let page_table = get_kernel_page_table();
596            let preempt_guard = disable_preempt::<A>();
597
598            // Save regions state before cursor_mut so postcondition trigger can fire.
599            let ghost pre_cursor_regions: MetaRegionOwners = *regions;
600
601            let (mut cursor, Tracked(cursor_owner)) =
602            (#[verus_spec(with Tracked(owner.pt_owner), Tracked(guard_perm), Tracked(regions), Tracked(guards))]
603                page_table.cursor_mut(preempt_guard, &va_range)).unwrap();
604
605            let pages = collect_largest_pages(va_range.start, pa_range.start, len);
606
607            for (pa, level) in it: pages.into_iter()
608                invariant
609                    cursor.inner.invariants(cursor_owner, *regions, *guards),
610                    forall |i: int| 0 <= i < it.elements.len() ==> (#[trigger] it.elements[i]).0 % PAGE_SIZE == 0,
611                    forall |i: int| 0 <= i < it.elements.len() ==>
612                        1 <= (#[trigger] it.elements[i]).1<= NR_LEVELS,
613                    forall |i: int| 0 <= i < it.elements.len() ==>
614                        (#[trigger] it.elements[i]).1 <= KernelPtConfig::HIGHEST_TRANSLATION_LEVEL(),
615                    sum_page_sizes_spec(it.elements, 0, it.elements.len() as int) == len as nat,
616                    forall |i: int| 0 <= i < it.elements.len() ==>
617                        (va_range.start as nat + #[trigger] sum_page_sizes_spec(it.elements, 0, i))
618                            % page_size(it.elements[i].1) as nat == 0,
619                    // VA tracking invariant: cursor has advanced past sum of processed pages.
620                    cursor.inner.va as nat
621                        == va_range.start as nat
622                            + sum_page_sizes_spec(it.elements, 0, it.pos as int),
623                    cursor.inner.barrier_va.end == va_range.start + len,
624                    it.pos <= it.elements.len(),
625                    // map_cursor_requires holds while there are remaining elements.
626                    it.pos < it.elements.len() ==>
627                        cursor.map_cursor_requires(cursor_owner, *guards),
628                    // PA tracking: element[i].0 == pa_range.start + sum of preceding sizes.
629                    forall |i: int| #![auto] 0 <= i < it.elements.len() ==>
630                        it.elements[i].0 as nat
631                            == pa_range.start as nat + sum_page_sizes_spec(it.elements, 0, i),
632                    // Remaining PA range is not mapped.
633                    regions.paddr_range_not_mapped(
634                        (pa_range.start as nat
635                            + sum_page_sizes_spec(it.elements, 0, it.pos as int)) as usize
636                        ..pa_range.end
637                    ),
638                    // pa_range.end == pa_range.start + len in nat (constant throughout loop).
639                    pa_range.end as nat == pa_range.start as nat + len as nat,
640                    // guard_level == NR_LEVELS (from cursor_mut postcondition, preserved by map).
641                    cursor.inner.guard_level == NR_LEVELS as u8,
642                    // Physical address bound (function precondition; constant throughout loop).
643                    pa_range.end <= MAX_PADDR,
644            {
645                let pos: Ghost<int> = Ghost(it.pos as int);
646
647                proof {
648                    sum_page_sizes_extend_right(it.elements, 0, pos@);
649                    sum_page_sizes_mono(it.elements, 0, pos@ + 1, it.elements.len() as int);
650                }
651
652                let item = MappedItem::Untracked(pa, level, prop);
653                proof_decl! {
654                    let tracked entry_owner =
655                        EntryOwner::<KernelPtConfig>::new_untracked_frame(pa, level, prop);
656                }
657
658                let ghost old_cursor_model: CursorView<KernelPtConfig> =
659                    cursor.inner.model(cursor_owner);
660                let ghost old_cursor_owner_va = cursor_owner.va;
661                let ghost old_va: nat = cursor.inner.va as nat;
662
663                proof {
664                    cursor_owner.view_preserves_inv(); // old_cursor_model.inv()
665                    cursor_owner.va.reflect_prop(cursor.inner.va);
666
667                    KernelPtConfig::item_into_raw_spec_untracked(pa, level, prop);
668
669                    // pa_range.end == pa_range.start + len (in nat) — from loop invariant.
670                    sum_page_sizes_extend_right(it.elements, 0, pos@);
671                    sum_page_sizes_mono(
672                        it.elements, 0, pos@ + 1, it.elements.len() as int);
673                }
674
675                // Save ghost copy of regions before map for post-map invariant maintenance.
676                let ghost pre_map_regions: MetaRegionOwners = *regions;
677
678                // SAFETY: The caller of `map_untracked_frames` has ensured the safety of this mapping.
679                assert(CursorMut::<'a, KernelPtConfig, A>::item_slot_in_regions(
680                    item, *regions,
681                )) by { admit() };
682                #[verus_spec(with Tracked(&mut cursor_owner), Tracked(entry_owner), Tracked(regions), Tracked(guards))]
683                let _ = cursor.map(item);
684
685                // Post-map: maintain the VA tracking invariant.
686                proof {
687                    KernelPtConfig::item_into_raw_spec_untracked(pa, level, prop);
688                    let level_raw = KernelPtConfig::item_into_raw_spec(item).1;
689                    
690                    let split_self = old_cursor_model.split_while_huge(
691                        page_size(level_raw));
692
693                    CursorView::<KernelPtConfig>::lemma_split_while_huge_preserves_cur_va(
694                        old_cursor_model, page_size(level_raw));
695
696                    let aligned_nat = vstd_extra::arithmetic::nat_align_up(
697                        old_cursor_owner_va.to_vaddr() as nat,
698                        page_size(level_raw) as nat,
699                    );
700                    
701                    lemma_page_size_ge_page_size(level_raw);
702
703                    vstd_extra::arithmetic::lemma_nat_align_up_sound(
704                        old_cursor_owner_va.to_vaddr() as nat,
705                        page_size(level_raw) as nat,
706                    );
707
708                    old_cursor_owner_va.align_diff(level_raw as int);
709                    
710                    sum_page_sizes_extend_right(it.elements, 0, pos@);
711                    
712                    let pa_next_nat = pa_range.start as nat + sum_page_sizes_spec(it.elements, 0, pos@ + 1);
713                    assert(pa_next_nat == pa as nat + page_size(level) as nat);
714
715                    if pos@ + 1 < it.elements.len() as int {
716                        let next_level = it.elements[pos@ + 1].1;
717                        sum_page_sizes_extend_right(it.elements, 0, pos@ + 1);
718                        sum_page_sizes_mono(it.elements, 0, pos@ + 2, it.elements.len() as int);
719                        lemma_page_size_ge_page_size(next_level);
720                    }
721                }
722            }
723        }
724
725        Self { range }
726    }
727}
728
729/*impl Drop for KVirtArea {
730    fn drop(&mut self) {
731        // 1. unmap all mapped pages.
732        let page_table = KERNEL_PAGE_TABLE.unwrap();
733        let range = self.start()..self.end();
734        //let preempt_guard = disable_preempt();
735        let mut cursor = page_table.cursor_mut(&range).unwrap();
736        loop {
737            // SAFETY: The range is under `KVirtArea` so it is safe to unmap.
738            let Some(frag) = (unsafe { cursor.take_next(self.end() - cursor.virt_addr()) }) else {
739                break;
740            };
741            drop(frag);
742        }
743        // 2. free the virtual block
744        //KVIRT_AREA_ALLOCATOR.free(range);
745    }
746}*/
747} // verus!