Skip to main content

ostd/mm/frame/
meta.rs

1// SPDX-License-Identifier: MPL-2.0
2//! Metadata management of frames.
3//!
4//! You can picture a globally shared, static, gigantic array of metadata
5//! initialized for each frame.
6//! Each entry in this array holds the metadata for a single frame.
7//! There would be a dedicated small
8//! "heap" space in each slot for dynamic metadata. You can store anything as
9//! the metadata of a frame as long as it's [`Sync`].
10//!
11//! # Implementation
12//!
13//! The slots are placed in the metadata pages mapped to a certain virtual
14//! address in the kernel space. So finding the metadata of a frame often
15//! comes with no costs since the translation is a simple arithmetic operation.
16use vstd::prelude::*;
17
18pub mod mapping;
19
20use self::mapping::{frame_to_index, frame_to_meta, meta_addr, meta_to_frame, META_SLOT_SIZE};
21use crate::mm::io::{Infallible, VmReader};
22use crate::specs::mm::frame::meta_owners::*;
23use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
24
25use vstd::atomic::{PAtomicU64, PAtomicU8, PermissionU64};
26use vstd::cell::pcell_maybe_uninit;
27
28use vstd::simple_pptr::{self, PPtr};
29use vstd_extra::cast_ptr::*;
30use vstd_extra::ownership::*;
31
32use core::{
33    alloc::Layout,
34    any::Any,
35    cell::UnsafeCell,
36    fmt::Debug,
37    marker::PhantomData,
38    mem::{align_of, size_of, ManuallyDrop, MaybeUninit},
39    result::Result,
40    sync::atomic::{AtomicU64, AtomicU8, Ordering},
41};
42
43//use align_ext::AlignExt;
44//use log::info;
45
46use crate::{
47    //    boot::memory_region::MemoryRegionType,
48    //    const_assert,
49    mm::{
50        //        frame::allocator::{self, EarlyAllocatedFrameMeta},
51        paddr_to_vaddr,
52        //        page_table::boot_pt,
53        page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
54        /*Infallible,*/ Paddr,
55        PagingLevel,
56        //Segment,
57        Vaddr,
58        MAX_NR_PAGES,
59        MAX_PADDR,
60        /*VmReader,*/ PAGE_SIZE,
61    },
62    specs::arch::kspace::FRAME_METADATA_RANGE,
63    //    panic::abort,
64    //    util::ops::range_difference,
65};
66
67verus! {
68
69#[repr(C)]
70pub struct MetaSlot {
71    /// The metadata of a frame.
72    ///
73    /// It is placed at the beginning of a slot because:
74    ///  - the implementation can simply cast a `*const MetaSlot`
75    ///    to a `*const AnyFrameMeta` for manipulation;
76    ///  - if the metadata need special alignment, we can provide
77    ///    at most `PAGE_METADATA_ALIGN` bytes of alignment;
78    ///  - the subsequent fields can utilize the padding of the
79    ///    reference count to save space.
80    ///
81    /// # Verification Design
82    /// We model the metadata of the slot as a `MetaSlotStorage`, which is a tagged union of the different
83    /// types of metadata defined in the development.
84    pub storage: pcell_maybe_uninit::PCell<MetaSlotStorage>,
85    /// The reference count of the page.
86    ///
87    /// Specifically, the reference count has the following meaning:
88    ///  - `REF_COUNT_UNUSED`: The page is not in use.
89    ///  - `REF_COUNT_UNIQUE`: The page is owned by a [`UniqueFrame`].
90    ///  - `0`: The page is being constructed ([`Frame::from_unused`])
91    ///    or destructured ([`drop_last_in_place`]).
92    ///  - `1..REF_COUNT_MAX`: The page is in use.
93    ///  - `REF_COUNT_MAX..REF_COUNT_UNIQUE`: Illegal values to
94    ///    prevent the reference count from overflowing. Otherwise,
95    ///    overflowing the reference count will cause soundness issue.
96    pub ref_count: PAtomicU64,
97    /// The virtual table that indicates the type of the metadata. Currently we do not verify this because
98    /// of the dependency on the `dyn Trait` pattern. But we can revisit it now that `dyn Trait` is supported by Verus.
99    pub vtable_ptr: PPtr<usize>,
100    /// This is only accessed by [`crate::mm::frame::linked_list`].
101    /// It stores 0 if the frame is not in any list, otherwise it stores the
102    /// ID of the list.
103    ///
104    /// It is ugly but allows us to tell if a frame is in a specific list by
105    /// one relaxed read. Otherwise, if we store it conditionally in `storage`
106    /// we would have to ensure that the type is correct before the read, which
107    /// costs a synchronization.
108    pub in_list: PAtomicU64,
109}
110
111pub const REF_COUNT_UNUSED: u64 = u64::MAX;
112
113pub const REF_COUNT_UNIQUE: u64 = u64::MAX - 1;
114
115pub const REF_COUNT_MAX: u64 = i64::MAX as u64;
116
117type FrameMetaVtablePtr = core::ptr::DynMetadata<dyn AnyFrameMeta>;
118
119/// The error type for getting the frame from a physical address.
120#[derive(Debug)]
121pub enum GetFrameError {
122    /// The frame is in use.
123    InUse,
124    /// The frame is not in use.
125    Unused,
126    /// The frame is being initialized or destructed.
127    Busy,
128    /// The frame is private to an owner of [`UniqueFrame`].
129    ///
130    /// [`UniqueFrame`]: super::unique::UniqueFrame
131    Unique,
132    /// The provided physical address is out of bound.
133    OutOfBound,
134    /// The provided physical address is not aligned.
135    NotAligned,
136    /// Verification only: `compare_exchange` returned `Err`, retry
137    Retry,
138}
139
140pub open spec fn get_slot_spec(paddr: Paddr) -> (res: PPtr<MetaSlot>)
141    recommends
142        paddr % 4096 == 0,
143        paddr < MAX_PADDR,
144{
145    let slot = frame_to_meta(paddr);
146    PPtr(slot, PhantomData::<MetaSlot>)
147}
148
149/// Space-holder of the AnyFrameMeta virtual table.
150pub trait AnyFrameMeta: Repr<MetaSlotStorage> {
151    exec fn on_drop(&mut self, _reader: &mut VmReader<'_, Infallible>) {
152    }
153
154    exec fn is_untyped(&self) -> bool {
155        false
156    }
157
158    spec fn vtable_ptr(&self) -> usize;
159}
160
161global layout MetaSlot is size == 64, align == 8;
162
163pub broadcast axiom fn size_of_meta_slot()
164    ensures
165        #![trigger size_of::<MetaSlot>()]
166        #![trigger align_of::<MetaSlot>()]
167        size_of::<MetaSlot>() == 64,
168        align_of::<MetaSlot>() == 8;
169
170#[inline(always)]
171#[verifier::allow_in_spec]
172pub const fn meta_slot_size() -> (res: usize)
173    returns
174        64usize,
175{
176    size_of::<MetaSlot>()
177}
178
179pub open spec fn has_safe_slot(paddr: Paddr) -> bool {
180    &&& paddr % PAGE_SIZE == 0
181    &&& paddr < MAX_PADDR
182}
183
184/// Gets the reference to a metadata slot.
185/// # Verified Properties
186/// ## Preconditions
187/// `paddr` is the physical address of a frame, with a valid owner.
188/// ## Postconditions
189/// If `paddr` is aligned properly and in-bounds, the function returns a pointer to its metadata slot.
190/// ## Safety
191/// Verus ensures that the pointer will only be used when we have a permission object, so creating it is safe.
192pub(super) fn get_slot(paddr: Paddr) -> (res: Result<PPtr<MetaSlot>, GetFrameError>)
193    ensures
194        has_safe_slot(paddr) <==> res is Ok,
195        res is Ok ==> res.unwrap().addr() == frame_to_meta(paddr),
196{
197    if paddr % PAGE_SIZE != 0 {
198        return Err(GetFrameError::NotAligned);
199    }
200    if paddr >= MAX_PADDR {
201        return Err(GetFrameError::OutOfBound);
202    }
203    let vaddr = frame_to_meta(paddr);
204    let ptr = PPtr::<MetaSlot>::from_addr(vaddr);
205
206    // SAFETY: `ptr` points to a valid `MetaSlot` that will never be
207    // mutably borrowed, so taking an immutable reference to it is safe.
208    Ok(ptr)
209}
210
211#[verus_verify]
212impl MetaSlot {
213    /// This is the equivalent of &self as *const as Vaddr, but we need to axiomatize it.
214    /// # Safety
215    /// It is safe to take the address of a pointer, but it may not be safe to use that
216    /// address for all purposes.
217    #[verifier::external_body]
218    fn addr_of(&self, Tracked(perm): Tracked<&vstd::simple_pptr::PointsTo<MetaSlot>>) -> Paddr
219        requires
220            self == perm.value(),
221        returns
222            perm.addr(),
223    {
224        unimplemented!()
225    }
226
227    /// A helper function that casts a `MetaSlot` pointer to a `Metadata` pointer of type `M`.
228    pub fn cast_slot<M: AnyFrameMeta + Repr<MetaSlotStorage>>(
229        &self,
230        addr: usize,
231        Tracked(perm): Tracked<&vstd::simple_pptr::PointsTo<MetaSlot>>,
232    ) -> (res: ReprPtr<MetaSlot, Metadata<M>>)
233        requires
234            perm.value() == self,
235            addr == perm.addr(),
236        ensures
237            res.ptr.addr() == addr,
238            res.addr() == addr,
239    {
240        ReprPtr::<MetaSlot, Metadata<M>> { ptr: PPtr::from_addr(addr), _T: PhantomData }
241    }
242
243    /// A helper function that casts `MetaSlot` permission to a `Metadata` permission of type `M`.
244    pub proof fn cast_perm<M: AnyFrameMeta + Repr<MetaSlotStorage>>(
245        tracked perm: vstd::simple_pptr::PointsTo<MetaSlot>,
246        tracked inner_perms: MetadataInnerPerms,
247    ) -> (tracked res: PointsTo<MetaSlot, Metadata<M>>)
248        ensures
249            res.points_to == perm,
250            res.inner_perms == inner_perms,
251    {
252        PointsTo { points_to: perm, inner_perms, _T: PhantomData }
253    }
254
255    /// Initializes the metadata slot of a frame assuming it is unused.
256    ///
257    /// If successful, the function returns a pointer to the metadata slot.
258    /// And the slot is initialized with the given metadata.
259    ///
260    /// The resulting reference count held by the returned pointer is
261    /// [`REF_COUNT_UNIQUE`] if `as_unique_ptr` is `true`, otherwise `1`.
262    ///
263    /// # Verified Properties
264    /// ## Preconditions
265    /// - **Safety Invariant**: Metaslot region invariants must hold.
266    /// - **Bookkeeping**: The slot permissions must be available in order to check the reference count.
267    /// This precondition is stronger than it needs to be; absent permissions correspond to error cases.
268    /// ## Postconditions
269    /// - **Safety Invariant**: Metaslot region invariants hold after the call.
270    /// - **Safety**: Other slots are not affected by the call.
271    /// - **Correctness**: If successful, the function returns a pointer to the metadata slot and a permission to the slot.
272    /// - **Correctness**: If successful, the slot is initialized with the given metadata.
273    /// ## Safety
274    /// - This function returns an error if `paddr` does not correspond to a valid slot or the slot is in use.
275    /// - Accesses to the slot itself are gated by atomic checks, avoiding data races.
276    #[verus_spec(res =>
277        with Tracked(regions): Tracked<&mut MetaRegionOwners>
278        requires
279            old(regions).inv(),
280            old(regions).slots.contains_key(frame_to_index(paddr)),
281        ensures
282            final(regions).inv(),
283            res matches Ok((res, perm)) ==> Self::get_from_unused_perm_spec(paddr, metadata, as_unique_ptr, res, perm@),
284            res is Ok ==> Self::get_from_unused_spec(paddr, as_unique_ptr, *old(regions), *final(regions)),
285            // If we can make the failure conditions exhaustive, we can add this as a liveness condition.
286            !has_safe_slot(paddr) ==> res is Err,
287    )]
288    pub(super) fn get_from_unused<M: AnyFrameMeta + Repr<MetaSlotStorage> + OwnerOf>(
289        paddr: Paddr,
290        metadata: M,
291        as_unique_ptr: bool,
292    ) -> (res: Result<(PPtr<Self>, Tracked<PointsTo<MetaSlot, Metadata<M>>>), GetFrameError>) {
293        let slot = get_slot(paddr)?;
294
295        proof {
296            if has_safe_slot(paddr) {
297                regions.inv_implies_correct_addr(paddr);
298            }
299        }
300
301        let tracked mut slot_own = regions.slot_owners.tracked_remove(frame_to_index(paddr));
302        let tracked mut slot_perm = regions.slots.tracked_remove(frame_to_index(paddr));
303        let tracked mut inner_perms = slot_own.take_inner_perms();
304
305        // `Acquire` pairs with the `Release` in `drop_last_in_place` and ensures the metadata
306        // initialization won't be reordered before this memory compare-and-exchange.
307        let last_ref_cnt = slot.borrow(Tracked(&slot_perm)).ref_count.compare_exchange(
308            Tracked(&mut inner_perms.ref_count),
309            REF_COUNT_UNUSED,
310            0,
311        ).map_err(
312            |val|
313                match val {
314                    REF_COUNT_UNIQUE => GetFrameError::Unique,
315                    0 => GetFrameError::Busy,
316                    _ => GetFrameError::InUse,
317                },
318        );
319
320        if let Err(err) = last_ref_cnt {
321            proof {
322                slot_own.sync_inner(&inner_perms);
323                regions.slot_owners.tracked_insert(frame_to_index(paddr), slot_own);
324                regions.slots.tracked_insert(frame_to_index(paddr), slot_perm);
325            }
326
327            return Err(err);
328        }
329        // SAFETY: The slot now has a reference count of `0`, other threads will
330        // not access the metadata slot so it is safe to have a mutable reference.
331
332        #[verus_spec(with Tracked(&mut inner_perms.storage), Tracked(&mut inner_perms.vtable_ptr))]
333        slot.borrow(Tracked(&slot_perm)).write_meta(metadata);
334
335        if as_unique_ptr {
336            // No one can create a `Frame` instance directly from the page
337            // address, so `Relaxed` is fine here.
338            let mut contents = slot.take(Tracked(&mut slot_perm));
339            contents.ref_count.store(Tracked(&mut inner_perms.ref_count), REF_COUNT_UNIQUE);
340            slot.put(Tracked(&mut slot_perm), contents);
341        } else {
342            // `Release` is used to ensure that the metadata initialization
343            // won't be reordered after this memory store.
344            slot.borrow(Tracked(&slot_perm)).ref_count.store(
345                Tracked(&mut inner_perms.ref_count),
346                1,
347            );
348        }
349
350        proof {
351            slot_own.usage = PageUsage::Frame;
352            slot_own.sync_inner(&inner_perms);
353            regions.slot_owners.tracked_insert(frame_to_index(paddr), slot_own);
354            assert(regions.inv());
355        }
356        let tracked perm = MetaSlot::cast_perm::<M>(slot_perm, inner_perms);
357
358        Ok((slot, Tracked(perm)))
359    }
360
361    /// The inner loop of `Self::get_from_in_use`.
362    /// # Verified Properties
363    /// ## Preconditions
364    /// - The permission must point to the slot.
365    /// - The permission must be initialized.
366    /// - **Liveness**: The reference count of the inner permissions must not be at the maximum.
367    /// ## Postconditions
368    /// - The reference count of the inner permissions is increased by one.
369    #[verus_spec(res =>
370        with Tracked(perm): Tracked<&vstd::simple_pptr::PointsTo<MetaSlot>>,
371            Tracked(inner_perms): Tracked<&mut MetadataInnerPerms>,
372        requires
373            perm.pptr() == slot,
374            perm.is_init(),
375            perm.value().ref_count.id() == old(inner_perms).ref_count.id(),
376            // In order to not panic, the reference count shouldn't be at the maximum.
377            old(inner_perms).ref_count.value() + 1 < REF_COUNT_MAX,
378        ensures
379            res is Ok ==> final(inner_perms).ref_count.value() == old(inner_perms).ref_count.value() + 1,
380            // On Ok, the old ref_count was > 0 (the 0 case returns Err(Busy)).
381            res is Ok ==> old(inner_perms).ref_count.value() > 0,
382            // On Ok, the returned PPtr is the slot argument.
383            res matches Ok(ptr) ==> ptr == slot,
384            res is Err ==> final(inner_perms).ref_count.value() == old(inner_perms).ref_count.value(),
385            final(inner_perms).ref_count.id() == old(inner_perms).ref_count.id(),
386            final(inner_perms).storage == old(inner_perms).storage,
387            final(inner_perms).vtable_ptr == old(inner_perms).vtable_ptr,
388            final(inner_perms).in_list == old(inner_perms).in_list,
389    )]
390    fn get_from_in_use_loop(slot: PPtr<MetaSlot>) -> Result<PPtr<Self>, GetFrameError> {
391        match slot.borrow(Tracked(perm)).ref_count.load(Tracked(&mut inner_perms.ref_count)) {
392            REF_COUNT_UNUSED => {
393                return Err(GetFrameError::Unused);
394            },
395            REF_COUNT_UNIQUE => {
396                return Err(GetFrameError::Unique);
397            },
398            0 => {
399                return Err(GetFrameError::Busy);
400            },
401            last_ref_cnt => {
402                if last_ref_cnt >= REF_COUNT_MAX {
403                    // See `Self::inc_ref_count` for the explanation.
404                    vstd_extra::panic::panic_diverge();
405                }
406                // Using `Acquire` here to pair with `get_from_unused` or
407                // `<Frame<M> as From<UniqueFrame<M>>>::from` (who must be
408                // performed after writing the metadata).
409                //
410                // It ensures that the written metadata will be visible to us.
411
412                if slot.borrow(Tracked(perm)).ref_count.compare_exchange_weak(
413                    Tracked(&mut inner_perms.ref_count),
414                    last_ref_cnt,
415                    last_ref_cnt + 1,
416                ).is_ok() {
417                    return Ok(slot);
418                } else {
419                    return Err(GetFrameError::Retry);
420                }
421            },
422        }
423    }
424
425    /// Gets another owning pointer to the metadata slot from the given page.
426    /// # Verified Properties
427    /// ## Verification Design
428    /// To simplify the verification, we verify the loop body separately from the outer loop. We do not prove termination.
429    /// ## Preconditions
430    /// - **Safety Invariant**: Metaslot region invariants must hold.
431    /// - **Bookkeeping**: The slot permissions must be available in order to check the reference count.
432    /// This precondition is stronger than it needs to be; absent permissions correspond to error cases.
433    /// - **Liveness**: The reference count of the inner permissions must not be at the maximum, or the function will panic.
434    /// ## Postconditions
435    /// - **Safety**: Metaslot region invariants hold after the call.
436    /// - **Correctness**: If successful, the slot's reference count is increased by one.
437    /// - **Correctness**: If unsuccessful, the metaslot region remains unchanged.
438    /// ## Safety
439    /// The potential data race is avoided by the spin-lock.
440    #[verus_spec(res =>
441        with Tracked(regions): Tracked<&mut MetaRegionOwners>
442        requires
443            old(regions).inv(),
444            !Self::get_from_in_use_panic_cond(paddr, *old(regions)),
445            old(regions).slots.contains_key(frame_to_index(paddr)),
446            old(regions).slot_owners.contains_key(frame_to_index(paddr)),
447            old(regions).slots[frame_to_index(paddr)].addr() == frame_to_meta(paddr),
448            old(regions).slot_owners[frame_to_index(paddr)].inner_perms.ref_count.id() ==
449                old(regions).slots[frame_to_index(paddr)].value().ref_count.id(),
450        ensures
451            final(regions).inv(),
452            !has_safe_slot(paddr) ==> res is Err,
453            res is Ok ==> Self::get_from_in_use_success(paddr, *old(regions), *final(regions)),
454            res matches Ok(ptr) ==> ptr == old(regions).slots[frame_to_index(paddr)].pptr(),
455            res is Err ==> *final(regions) == *old(regions),
456    )]
457    #[verifier::exec_allows_no_decreases_clause]
458    pub(super) fn get_from_in_use(paddr: Paddr) -> Result<PPtr<Self>, GetFrameError> {
459        let ghost regions0 = *regions;
460
461        let slot = get_slot(paddr)?;
462
463        // get_slot doesn't modify regions
464        proof { assert(regions0 == *old(regions)); }
465
466        let tracked mut slot_own = regions.slot_owners.tracked_remove(frame_to_index(paddr));
467        let tracked mut slot_perm = regions.slots.tracked_remove(frame_to_index(paddr));
468        let tracked mut inner_perms = slot_own.take_inner_perms();
469
470        let ghost pre = inner_perms.ref_count.value();
471
472        // Try to increase the reference count for an in-use frame. Otherwise fail.
473        loop
474            invariant
475                has_safe_slot(paddr),
476                slot_perm.addr() == slot.addr(),
477                slot_perm.is_init(),
478                slot_perm.value().ref_count.id() == inner_perms.ref_count.id(),
479                inner_perms.ref_count.value() == pre,
480                inner_perms.ref_count.value() + 1 < REF_COUNT_MAX,
481                regions0.slots.contains_key(frame_to_index(paddr)),
482                regions0.slot_owners.contains_key(frame_to_index(paddr)),
483                regions0.inv(),
484                regions0.slots[frame_to_index(paddr)] == slot_perm,
485                !Self::get_from_in_use_panic_cond(paddr, *old(regions)),
486                // Preserved fields of slot_own for inv() and wf() proofs
487                slot_own.self_addr == regions0.slot_owners[frame_to_index(paddr)].self_addr,
488                slot_own.usage == regions0.slot_owners[frame_to_index(paddr)].usage,
489                slot_own.raw_count == regions0.slot_owners[frame_to_index(paddr)].raw_count,
490                slot_own.paths_in_pt == regions0.slot_owners[frame_to_index(paddr)].paths_in_pt,
491                FRAME_METADATA_RANGE.start <= slot_own.self_addr < FRAME_METADATA_RANGE.end,
492                slot_own.self_addr % META_SLOT_SIZE == 0,
493                slot_own.self_addr == slot_perm.addr(),
494                // wf relation: slot cell ids match inner_perms ids
495                slot_perm.value().storage.id() == inner_perms.storage.id(),
496                slot_perm.value().vtable_ptr == inner_perms.vtable_ptr.pptr(),
497                slot_perm.value().in_list.id() == inner_perms.in_list.id(),
498                // inner_perms fields preserved across loop iterations
499                inner_perms.storage == regions0.slot_owners[frame_to_index(paddr)].inner_perms.storage,
500                inner_perms.vtable_ptr == regions0.slot_owners[frame_to_index(paddr)].inner_perms.vtable_ptr,
501                inner_perms.in_list == regions0.slot_owners[frame_to_index(paddr)].inner_perms.in_list,
502                // pre equals the original ref_count value
503                pre == regions0.slot_owners[frame_to_index(paddr)].inner_perms.ref_count.value(),
504                // regions0 equals old(regions)
505                regions0 == *old(regions),
506                // slot pptr matches what postcondition expects
507                slot == regions0.slots[frame_to_index(paddr)].pptr(),
508                // regions state: slot_owners and slots have idx removed
509                regions.slot_owners == regions0.slot_owners.remove(frame_to_index(paddr)),
510                regions.slots == regions0.slots.remove(frame_to_index(paddr)),
511        {
512            match #[verus_spec(with Tracked(&slot_perm), Tracked(&mut inner_perms))]
513            Self::get_from_in_use_loop(slot) {
514                Err(GetFrameError::Retry) => {
515                    core::hint::spin_loop();
516                },
517                res => {
518                    proof {
519                        let idx = frame_to_index(paddr);
520
521                        assert(inner_perms.ref_count.id()
522                            == regions0.slot_owners[idx].inner_perms.ref_count.id());
523
524                        let ghost orig = regions0.slot_owners[idx];
525                        assert(orig.inv());
526                        assert(pre == orig.inner_perms.ref_count.value());
527
528                        assert(inner_perms.vtable_ptr == orig.inner_perms.vtable_ptr);
529
530                        if inner_perms.ref_count.value() > 0 {
531                            // Either Ok (pre > 0) or Err with pre > 0
532                            assert(pre > 0);
533                            assert(0 < orig.inner_perms.ref_count.value());
534                            assert(orig.inner_perms.ref_count.value() <= REF_COUNT_MAX);
535                            assert(orig.inner_perms.vtable_ptr.is_init());
536                            assert(inner_perms.vtable_ptr.is_init());
537                        }
538
539                        slot_own.sync_inner(&inner_perms);
540                        assert(slot_own.inv());
541                        assert(slot_perm.value().wf(slot_own));
542                        assert(slot_own.self_addr == slot_perm.addr());
543
544                        if res is Err {
545                            assert(inner_perms.ref_count.value() == pre);
546                            assert(inner_perms.ref_count.id() == orig.inner_perms.ref_count.id());
547                            assert(inner_perms.storage == orig.inner_perms.storage);
548                            assert(inner_perms.vtable_ptr == orig.inner_perms.vtable_ptr);
549                            assert(inner_perms.in_list == orig.inner_perms.in_list);
550                        }
551
552                        regions.slot_owners.tracked_insert(idx, slot_own);
553                        regions.slots.tracked_insert(idx, slot_perm);
554
555                        assert(regions.slot_owners.dom() =~= regions0.slot_owners.dom());
556                        assert(regions.slots =~= regions0.slots);
557
558                        assert forall|i: usize| i != idx
559                            implies #[trigger] regions.slot_owners[i] == regions0.slot_owners[i] by {};
560
561                        assert(regions.slot_owners[idx].inner_perms.ref_count.id()
562                            == regions0.slot_owners[idx].inner_perms.ref_count.id());
563                        assert(regions.slot_owners[idx].inner_perms.storage
564                            == regions0.slot_owners[idx].inner_perms.storage);
565                        assert(regions.slot_owners[idx].inner_perms.vtable_ptr
566                            == regions0.slot_owners[idx].inner_perms.vtable_ptr);
567                        assert(regions.slot_owners[idx].inner_perms.in_list
568                            == regions0.slot_owners[idx].inner_perms.in_list);
569                        assert(regions.slot_owners[idx].self_addr
570                            == regions0.slot_owners[idx].self_addr);
571                        assert(regions.slot_owners[idx].usage
572                            == regions0.slot_owners[idx].usage);
573                        assert(regions.slot_owners[idx].raw_count
574                            == regions0.slot_owners[idx].raw_count);
575
576                        // For ptr postcondition: slot_perm.pptr() == old(regions).slots[idx].pptr()
577                        assert(slot_perm == regions0.slots[idx]);
578
579                        // For Err ==> *regions == *old(regions)
580                        if res is Err {
581                            // On Err, ref_count unchanged so slot_own == orig.
582                            // Use extensional equality axiom for PermissionU64.
583                            assert(regions.slot_owners[idx].inner_perms.ref_count.value()
584                                == regions0.slot_owners[idx].inner_perms.ref_count.value());
585                            assert(regions.slot_owners[idx].inner_perms.ref_count.id()
586                                == regions0.slot_owners[idx].inner_perms.ref_count.id());
587                            vstd_extra::auxiliary::axiom_permission_u64_ext_eq(
588                                regions.slot_owners[idx].inner_perms.ref_count,
589                                regions0.slot_owners[idx].inner_perms.ref_count,
590                            );
591                            assert(regions.slot_owners[idx] == regions0.slot_owners[idx]);
592                            assert(regions.slot_owners =~= regions0.slot_owners);
593                            assert(*regions == *old(regions));
594                        }
595                    }
596
597                    return res;
598                },
599            }
600        }
601    }
602
603    /// Increases the frame reference count by one.
604    ///
605    /// # Verified Properties
606    /// ## Preconditions
607    /// - **Bookkeeping**: The permission must match the reference count being updated.
608    /// - **Liveness**: The function will abort if the reference count is at the maximum.
609    /// ## Postconditions
610    /// - **Correctness**: The reference count is increased by one.
611    /// ## Safety
612    /// By requiring the caller to provide a permission for the reference count, we ensure that it already has a reference to the frame.
613    #[verus_spec(
614        with Tracked(rc_perm): Tracked<&mut PermissionU64>
615    )]
616    pub(super) fn inc_ref_count(&self)
617        requires
618            old(rc_perm).is_for(self.ref_count),
619            !Self::inc_ref_count_panic_cond(*old(rc_perm)),
620        ensures
621            final(rc_perm).value() == old(rc_perm).value() + 1,
622            final(rc_perm).id() == old(rc_perm).id(),
623    {
624        let last_ref_cnt = self.ref_count.fetch_add(Tracked(rc_perm), 1);
625
626        if last_ref_cnt >= REF_COUNT_MAX {
627            // This follows the same principle as the `Arc::clone` implementation to prevent the
628            // reference count from overflowing. See also
629            // <https://doc.rust-lang.org/std/sync/struct.Arc.html#method.clone>.
630            #[cfg(feature = "allow_panic")]
631            crate::panic::abort();
632            assert(false);
633        }
634    }
635
636    /// Gets the corresponding frame's physical address.
637    ///
638    /// # Verified Properties
639    /// ## Preconditions
640    /// - **Safety**: The permission must point to a valid metadata slot.
641    /// - **Correctness**: The permission must point to the metadata slot.
642    /// ## Postconditions
643    /// - **Correctness**: The function returns the physical address of the frame.
644    /// ## Safety
645    /// The safety precondition requires that the permission points to a valid metadata slot.
646    /// This is an internal function, so it is fine to require the caller to verify this.
647    #[verus_spec(
648        with Tracked(perm): Tracked<&vstd::simple_pptr::PointsTo<MetaSlot>>,
649        requires
650            perm.value() == self,
651            Self::frame_paddr_safety_cond(*perm),
652        returns
653            meta_to_frame(perm.addr()),
654    )]
655    pub(super) fn frame_paddr(&self) -> (pa: Paddr) {
656        let addr = self.addr_of(Tracked(perm));
657        meta_to_frame(addr)
658    }
659
660    /*
661    /// Gets a dynamically typed pointer to the stored metadata.
662    ///
663    /// # Safety
664    ///
665    /// The caller should ensure that:
666    ///  - the stored metadata is initialized (by [`Self::write_meta`]) and valid.
667    ///
668    /// The returned pointer should not be dereferenced as mutable unless having
669    /// exclusive access to the metadata slot.
670
671    #[verifier::external_body]
672    pub(super) unsafe fn dyn_meta_ptr<M: AnyFrameMeta>(&self) -> PPtr<M> {
673        unimplemented!()
674
675        // SAFETY: The page metadata is valid to be borrowed immutably, since
676        // it will never be borrowed mutably after initialization.
677        let vtable_ptr = unsafe { *self.vtable_ptr.get() };
678
679        // SAFETY: The page metadata is initialized and valid.
680        let vtable_ptr = *unsafe { vtable_ptr.assume_init_ref() };
681
682        let meta_ptr: *mut dyn AnyFrameMeta =
683            core::ptr::from_raw_parts_mut(self as *const MetaSlot as *mut MetaSlot, vtable_ptr);
684
685        meta_ptr
686    }*/
687    /// Gets the stored metadata as type `M`.
688    ///
689    /// # Verified Properties
690    /// ## Preconditions
691    /// - **Safety**: The caller must provide an existing permission that matches the contents of the metadata slot.
692    /// ## Postconditions
693    /// - **Correctness**: The function returns a pointer to the stored metadata, of type `M`.
694    /// ## Safety
695    /// - Calling the method is always safe, but using the returned pointer could
696    /// be unsafe. Specifically, the dereferencer should ensure that:
697    ///  - the stored metadata is initialized (by [`Self::write_meta`]) and valid;
698    ///  - the initialized metadata is of type `M` (`Repr<M>::wf`);
699    ///  - the returned pointer should not be dereferenced as mutable unless having exclusive access to the metadata slot.
700    #[verus_spec(
701        with Tracked(perm): Tracked<&vstd::simple_pptr::PointsTo<MetaSlot>>
702    )]
703    pub(super) fn as_meta_ptr<M: AnyFrameMeta + Repr<MetaSlotStorage>>(&self) -> (res: ReprPtr<
704        MetaSlot,
705        Metadata<M>,
706    >)
707        requires
708            self == perm.value(),
709        ensures
710            res.ptr.addr() == perm.addr(),
711            res.addr() == perm.addr(),
712    {
713        let addr = self.addr_of(Tracked(perm));
714        self.cast_slot(addr, Tracked(perm))
715    }
716
717    /// Writes the metadata to the slot without reading or dropping the previous value.
718    ///
719    /// # Verification Design
720    /// This function is axiomatized for now because of trait constraints.
721    /// ## Preconditions
722    /// - The caller must provide the permission token to the metadata slot's storage.
723    /// ## Postconditions
724    /// - The permission is initialized to the new metadata.
725    /// ## Safety
726    /// The caller must have exclusive access to the metadata slot's storage in order to provide the permission token.
727    #[verus_spec(
728        with Tracked(meta_perm): Tracked<&mut vstd::cell::pcell_maybe_uninit::PointsTo<MetaSlotStorage>>,
729            Tracked(vtable_perm): Tracked<&mut vstd::simple_pptr::PointsTo<usize>>
730        requires
731            self.storage.id() === old(meta_perm).id(),
732            self.vtable_ptr == old(vtable_perm).pptr(),
733            old(vtable_perm).is_uninit(),
734        ensures
735            final(meta_perm).id() == old(meta_perm).id(),
736            final(meta_perm).is_init(),
737            final(vtable_perm).pptr() == old(vtable_perm).pptr(),
738            final(vtable_perm).is_init(),
739            Metadata::<M>::metadata_from_inner_perms(*final(meta_perm)) == metadata,
740    )]
741    #[verifier::external_body]
742    pub(super) fn write_meta<M: AnyFrameMeta + Repr<MetaSlotStorage> + OwnerOf>(
743        &self,
744        metadata: M,
745    ) {
746        //        const { assert!(size_of::<M>() <= FRAME_METADATA_MAX_SIZE) };
747        //        const { assert!(align_of::<M>() <= FRAME_METADATA_MAX_ALIGN) };
748        // SAFETY: Caller ensures that the access to the fields are exclusive.
749        //        let vtable_ptr = unsafe { &mut *self.vtable_ptr.get() };
750        //        vtable_ptr.write(core::ptr::metadata(&metadata as &dyn AnyFrameMeta));
751        let ptr = &self.storage;
752
753        // SAFETY:
754        // 1. `ptr` points to the metadata storage.
755        // 2. The size and the alignment of the metadata storage is large enough to hold `M`
756        //    (guaranteed by the const assertions above).
757        // 3. We have exclusive access to the metadata storage (guaranteed by the caller).
758        //ReprPtr::<MetaSlot, M>::new_borrowed(ptr).put(Tracked(slot_own.storage.borrow_mut()), &metadata);
759    }
760
761    /// Drops the metadata and deallocates the frame.
762    ///
763    /// # Verified Properties
764    /// ## Preconditions
765    /// - **Safety Invariant**: The metadata slot must satisfy the safety invariants.
766    /// - **Safety**: The caller must provide an owner object for the metadata slot, which must include the permission for the
767    /// slot's `ref_count` field.
768    /// - **Safety**: The owner must satisfy [`drop_last_in_place_safety_cond`], which ensures that its reference count is 0
769    /// and it has no dangling raw pointers.
770    /// ## Postconditions
771    /// - **Safety**: The metadata slot satisfies the safety invariants after the call.
772    /// - **Correctness**: The reference count is set to `REF_COUNT_UNUSED` and the contents of the slot are uninitialized.
773    /// ## Safety
774    /// - By requiring the caller to provide an owner object, we ensure that it already has a reference to the frame.
775    /// - The safety precondition ensures that there are no dangling pointers, including raw pointer, guaranteeing temporal safety.
776    #[verus_spec(
777        with Tracked(owner): Tracked<&mut MetaSlotOwner>
778    )]
779    pub(super) fn drop_last_in_place(&self)
780        requires
781            old(owner).inv(),
782            self.ref_count.id() == old(owner).inner_perms.ref_count.id(),
783            Self::drop_last_in_place_safety_cond(*old(owner)),
784        ensures
785            final(owner).inv(),
786            final(owner).inner_perms.ref_count.value() == REF_COUNT_UNUSED,
787            final(owner).inner_perms.ref_count.id() == old(owner).inner_perms.ref_count.id(),
788            final(owner).inner_perms.storage.id() == old(owner).inner_perms.storage.id(),
789            final(owner).inner_perms.storage.is_uninit(),
790            final(owner).inner_perms.vtable_ptr.is_uninit(),
791            final(owner).inner_perms.vtable_ptr.pptr() == old(owner).inner_perms.vtable_ptr.pptr(),
792            final(owner).inner_perms.in_list == old(owner).inner_perms.in_list,
793            final(owner).self_addr == old(owner).self_addr,
794            final(owner).usage == old(owner).usage,
795            final(owner).raw_count == old(owner).raw_count,
796            final(owner).paths_in_pt == old(owner).paths_in_pt,
797    {
798        // This should be guaranteed as a safety requirement.
799        //        debug_assert_eq!(self.ref_count.load(Tracked(&*rc_perm)), 0);
800        // SAFETY: The caller ensures safety.
801        #[verus_spec(with Tracked(owner))]
802        self.drop_meta_in_place();
803
804        let tracked mut inner_perms = owner.take_inner_perms();
805
806        // `Release` pairs with the `Acquire` in `Frame::from_unused` and ensures
807        // `drop_meta_in_place` won't be reordered after this memory store.
808        self.ref_count.store(Tracked(&mut inner_perms.ref_count), REF_COUNT_UNUSED);
809
810        proof {
811            owner.sync_inner(&inner_perms);
812        }
813    }
814
815    /// Drops the metadata of a slot in place.
816    ///
817    /// After this operation, the metadata becomes uninitialized. Any access to the
818    /// metadata is undefined behavior unless it is re-initialized by [`Self::write_meta`].
819    ///
820    /// # Verification Design
821    /// This function is axiomatized because of its reliance on dynamic trait methods and `VmReader`.
822    /// The latter dependency makes it part of the "bootstrap gap".
823    /// Now that Verus better supports the `dyn Trait` pattern and we have verified `VmReader`, we can revisit it.
824    /// ## Preconditions
825    /// - The caller must provide an owner object for the metadata slot.
826    /// - The reference count must be 0
827    /// ## Safety
828    ///
829    /// The caller should ensure that:
830    ///  - the reference count is `0` (so we are the sole owner of the frame);
831    ///  - the metadata is initialized;
832    #[verifier::external_body]
833    #[verus_spec(
834        with Tracked(slot_own): Tracked<&mut MetaSlotOwner>
835        requires
836            old(slot_own).inner_perms.ref_count.value() == 0 || old(slot_own).inner_perms.ref_count.value() == REF_COUNT_UNIQUE,
837            old(slot_own).inner_perms.storage.is_init(),
838            old(slot_own).inner_perms.in_list.value() == 0,
839        ensures
840            final(slot_own).inner_perms.ref_count == old(slot_own).inner_perms.ref_count,
841            final(slot_own).inner_perms.storage.is_uninit(),
842            final(slot_own).inner_perms.storage.id() == old(slot_own).inner_perms.storage.id(),
843            final(slot_own).inner_perms.in_list == old(slot_own).inner_perms.in_list,
844            final(slot_own).inner_perms.vtable_ptr.is_uninit(),
845            final(slot_own).inner_perms.vtable_ptr.pptr() == old(slot_own).inner_perms.vtable_ptr.pptr(),
846            final(slot_own).self_addr == old(slot_own).self_addr,
847            final(slot_own).usage == old(slot_own).usage,
848            final(slot_own).raw_count == old(slot_own).raw_count,
849            final(slot_own).paths_in_pt == old(slot_own).paths_in_pt,
850    )]
851    #[verifier::external_body]
852    pub(super) fn drop_meta_in_place(&self) {
853        unimplemented!()/*        let paddr = self.frame_paddr();
854
855        // SAFETY: We have exclusive access to the frame metadata.
856        let vtable_ptr = unsafe { &mut *self.vtable_ptr.get() };
857        // SAFETY: The frame metadata is initialized and valid.
858        let vtable_ptr = unsafe { vtable_ptr.assume_init_read() };
859
860        let meta_ptr: *mut dyn AnyFrameMeta =
861            core::ptr::from_raw_parts_mut(self.storage.get(), vtable_ptr);
862
863        // SAFETY: The implementer of the frame metadata decides that if the frame
864        // is safe to be read or not.
865        let mut reader =
866            unsafe { VmReader::from_kernel_space(paddr_to_vaddr(paddr) as *const u8, PAGE_SIZE) };
867
868        // SAFETY: `ptr` points to the metadata storage which is valid to be mutably borrowed under
869        // `vtable_ptr` because the metadata is valid, the vtable is correct, and we have the exclusive
870        // access to the frame metadata.
871        unsafe {
872            // Invoke the custom `on_drop` handler.
873            (*meta_ptr).on_drop(&mut reader);
874            // Drop the frame metadata.
875            core::ptr::drop_in_place(meta_ptr);
876        }*/
877
878    }
879}
880
881/// The metadata of frames that holds metadata of frames.
882#[derive(Debug, Default)]
883pub struct MetaPageMeta {}
884
885//impl_frame_meta_for!(MetaPageMeta);
886/*
887/// Initializes the metadata of all physical frames.
888///
889/// The function returns a list of `Frame`s containing the metadata.
890///
891/// # Safety
892///
893/// This function should be called only once and only on the BSP,
894/// before any APs are started.
895pub(crate) unsafe fn init() -> Segment<MetaPageMeta> {
896    let max_paddr = {
897        let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions;
898        regions
899            .iter()
900            .filter(|r| r.typ() == MemoryRegionType::Usable)
901            .map(|r| r.base() + r.len())
902            .max()
903            .unwrap()
904    };
905
906    info!(
907        "Initializing frame metadata for physical memory up to {:x}",
908        max_paddr
909    );
910
911    // In RISC-V, the boot page table has mapped the 512GB memory,
912    // so we don't need to add temporary linear mapping.
913    // In LoongArch, the DWM0 has mapped the whole memory,
914    // so we don't need to add temporary linear mapping.
915    #[cfg(target_arch = "x86_64")]
916    add_temp_linear_mapping(max_paddr);
917
918    let tot_nr_frames = max_paddr / page_size::<PagingConsts>(1);
919    let (nr_meta_pages, meta_pages) = alloc_meta_frames(tot_nr_frames);
920
921    // Map the metadata frames.
922    boot_pt::with_borrow(|boot_pt| {
923        for i in 0..nr_meta_pages {
924            let frame_paddr = meta_pages + i * PAGE_SIZE;
925            let vaddr = frame_to_meta::<PagingConsts>(0) + i * PAGE_SIZE;
926            let prop = PageProperty {
927                flags: PageFlags::RW,
928                cache: CachePolicy::Writeback,
929                priv_flags: PrivilegedPageFlags::GLOBAL,
930            };
931            // SAFETY: we are doing the metadata mappings for the kernel.
932            unsafe { boot_pt.map_base_page(vaddr, frame_paddr / PAGE_SIZE, prop) };
933        }
934    })
935    .unwrap();
936
937    // Now the metadata frames are mapped, we can initialize the metadata.
938    super::MAX_PADDR.store(max_paddr, Ordering::Relaxed);
939
940    let meta_page_range = meta_pages..meta_pages + nr_meta_pages * PAGE_SIZE;
941
942    let (range_1, range_2) = allocator::EARLY_ALLOCATOR
943        .lock()
944        .as_ref()
945        .unwrap()
946        .allocated_regions();
947    for r in range_difference(&range_1, &meta_page_range) {
948        let early_seg = Segment::from_unused(r, |_| EarlyAllocatedFrameMeta).unwrap();
949        let _ = ManuallyDrop::new(early_seg);
950    }
951    for r in range_difference(&range_2, &meta_page_range) {
952        let early_seg = Segment::from_unused(r, |_| EarlyAllocatedFrameMeta).unwrap();
953        let _ = ManuallyDrop::new(early_seg);
954    }
955
956    mark_unusable_ranges();
957
958    Segment::from_unused(meta_page_range, |_| MetaPageMeta {}).unwrap()
959}
960
961/// Returns whether the global frame allocator is initialized.
962pub(in crate::mm) fn is_initialized() -> bool {
963    // `init` sets it with relaxed ordering somewhere in the middle. But due
964    // to the safety requirement of the `init` function, we can assume that
965    // there is no race conditions.
966    super::MAX_PADDR.load(Ordering::Relaxed) != 0
967}
968
969fn alloc_meta_frames(tot_nr_frames: usize) -> (usize, Paddr) {
970    let nr_meta_pages = tot_nr_frames
971        .checked_mul(size_of::<MetaSlot>())
972        .unwrap()
973        .div_ceil(PAGE_SIZE);
974    let start_paddr = allocator::early_alloc(
975        Layout::from_size_align(nr_meta_pages * PAGE_SIZE, PAGE_SIZE).unwrap(),
976    )
977    .unwrap();
978
979    let slots = paddr_to_vaddr(start_paddr) as *mut MetaSlot;
980
981    // Initialize the metadata slots.
982    for i in 0..tot_nr_frames {
983        // SAFETY: The memory is successfully allocated with `tot_nr_frames`
984        // slots so the index must be within the range.
985        let slot = unsafe { slots.add(i) };
986        // SAFETY: The memory is just allocated so we have exclusive access and
987        // it's valid for writing.
988        unsafe {
989            slot.write(MetaSlot {
990                storage: UnsafeCell::new(MetaSlotStorage::Empty([0; FRAME_METADATA_MAX_SIZE - 1])),
991                ref_count: AtomicU64::new(REF_COUNT_UNUSED),
992                vtable_ptr: UnsafeCell::new(MaybeUninit::uninit()),
993                in_list: AtomicU64::new(0),
994            })
995        };
996    }
997
998    (nr_meta_pages, start_paddr)
999}
1000
1001/// Unusable memory metadata. Cannot be used for any purposes.
1002#[derive(Debug)]
1003pub struct UnusableMemoryMeta;
1004impl_frame_meta_for!(UnusableMemoryMeta);
1005
1006/// Reserved memory metadata. Maybe later used as I/O memory.
1007#[derive(Debug)]
1008pub struct ReservedMemoryMeta;
1009impl_frame_meta_for!(ReservedMemoryMeta);
1010
1011/// The metadata of physical pages that contains the kernel itself.
1012#[derive(Debug, Default)]
1013pub struct KernelMeta;
1014impl_frame_meta_for!(KernelMeta);
1015
1016macro_rules! mark_ranges {
1017    ($region: expr, $typ: expr) => {{
1018        debug_assert!($region.base() % PAGE_SIZE == 0);
1019        debug_assert!($region.len() % PAGE_SIZE == 0);
1020
1021        let seg = Segment::from_unused($region.base()..$region.end(), |_| $typ).unwrap();
1022        let _ = ManuallyDrop::new(seg);
1023    }};
1024}
1025
1026fn mark_unusable_ranges() {
1027    let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions;
1028
1029    for region in regions
1030        .iter()
1031        .rev()
1032        .skip_while(|r| r.typ() != MemoryRegionType::Usable)
1033    {
1034        match region.typ() {
1035            MemoryRegionType::BadMemory => mark_ranges!(region, UnusableMemoryMeta),
1036            MemoryRegionType::Unknown => mark_ranges!(region, ReservedMemoryMeta),
1037            MemoryRegionType::NonVolatileSleep => mark_ranges!(region, UnusableMemoryMeta),
1038            MemoryRegionType::Reserved => mark_ranges!(region, ReservedMemoryMeta),
1039            MemoryRegionType::Kernel => mark_ranges!(region, KernelMeta),
1040            MemoryRegionType::Module => mark_ranges!(region, UnusableMemoryMeta),
1041            MemoryRegionType::Framebuffer => mark_ranges!(region, ReservedMemoryMeta),
1042            MemoryRegionType::Reclaimable => mark_ranges!(region, UnusableMemoryMeta),
1043            MemoryRegionType::Usable => {} // By default it is initialized as usable.
1044        }
1045    }
1046}
1047
1048/// Adds a temporary linear mapping for the metadata frames.
1049///
1050/// We only assume boot page table to contain 4G linear mapping. Thus if the
1051/// physical memory is huge we end up depleted of linear virtual memory for
1052/// initializing metadata.
1053#[cfg(target_arch = "x86_64")]
1054fn add_temp_linear_mapping(max_paddr: Paddr) {
1055    const PADDR4G: Paddr = 0x1_0000_0000;
1056
1057    if max_paddr <= PADDR4G {
1058        return;
1059    }
1060
1061    // TODO: We don't know if the allocator would allocate from low to high or
1062    // not. So we prepare all linear mappings in the boot page table. Hope it
1063    // won't drag the boot performance much.
1064    let end_paddr = max_paddr.align_up(PAGE_SIZE);
1065    let prange = PADDR4G..end_paddr;
1066    let prop = PageProperty {
1067        flags: PageFlags::RW,
1068        cache: CachePolicy::Writeback,
1069        priv_flags: PrivilegedPageFlags::GLOBAL,
1070    };
1071
1072    // SAFETY: we are doing the linear mapping for the kernel.
1073    unsafe {
1074        boot_pt::with_borrow(|boot_pt| {
1075            for paddr in prange.step_by(PAGE_SIZE) {
1076                let vaddr = LINEAR_MAPPING_BASE_VADDR + paddr;
1077                boot_pt.map_base_page(vaddr, paddr / PAGE_SIZE, prop);
1078            }
1079        })
1080        .unwrap();
1081    }
1082}
1083*/
1084} // verus!