ostd/mm/frame/
meta.rs

1// SPDX-License-Identifier: MPL-2.0
2//! Metadata management of frames.
3//!
4//! You can picture a globally shared, static, gigantic array of metadata
5//! initialized for each frame.
6//! Each entry in this array holds the metadata for a single frame.
7//! There would be a dedicated small
8//! "heap" space in each slot for dynamic metadata. You can store anything as
9//! the metadata of a frame as long as it's [`Sync`].
10//!
11//! # Implementation
12//!
13//! The slots are placed in the metadata pages mapped to a certain virtual
14//! address in the kernel space. So finding the metadata of a frame often
15//! comes with no costs since the translation is a simple arithmetic operation.
16use vstd::prelude::*;
17
18pub mod mapping;
19
20use self::mapping::{frame_to_index, frame_to_meta, meta_to_frame, META_SLOT_SIZE};
21use crate::specs::mm::frame::meta_owners::{MetaSlotOwner, PageUsage};
22use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
23
24use vstd::atomic::{PAtomicU64, PAtomicU8, PermissionU64};
25use vstd::cell::{self, PCell};
26
27use vstd::simple_pptr::{self, PPtr};
28use vstd_extra::cast_ptr::*;
29use vstd_extra::ownership::*;
30
31use core::{
32    alloc::Layout,
33    any::Any,
34    cell::UnsafeCell,
35    fmt::Debug,
36    marker::PhantomData,
37    mem::{align_of, size_of, ManuallyDrop, MaybeUninit},
38    result::Result,
39    sync::atomic::{AtomicU64, AtomicU8, Ordering},
40};
41
42//use align_ext::AlignExt;
43//use log::info;
44
45use crate::{
46    //    boot::memory_region::MemoryRegionType,
47    //    const_assert,
48    mm::{
49        //        frame::allocator::{self, EarlyAllocatedFrameMeta},
50        paddr_to_vaddr,
51        //        page_table::boot_pt,
52        page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
53        /*Infallible,*/ Paddr,
54        PagingLevel,
55        //Segment,
56        Vaddr,
57        MAX_NR_PAGES,
58        MAX_PADDR,
59        /*VmReader,*/ PAGE_SIZE,
60    },
61    specs::arch::kspace::FRAME_METADATA_RANGE,
62    //    panic::abort,
63    //    util::ops::range_difference,
64};
65
66/// The maximum number of bytes of the metadata of a frame.
67pub const FRAME_METADATA_MAX_SIZE: usize = META_SLOT_SIZE()
68    - size_of::<AtomicU64>()
69//    - size_of::<FrameMetaVtablePtr>()
70    - size_of::<AtomicU64>();
71/// The maximum alignment in bytes of the metadata of a frame.
72pub const FRAME_METADATA_MAX_ALIGN: usize = META_SLOT_SIZE();
73
74//const META_SLOT_SIZE: usize = 64;
75
76/*#[repr(C)]
77pub struct MetaSlot {
78    /// The metadata of a frame.
79    ///
80    /// It is placed at the beginning of a slot because:
81    ///  - the implementation can simply cast a `*const MetaSlot`
82    ///    to a `*const AnyFrameMeta` for manipulation;
83    ///  - if the metadata need special alignment, we can provide
84    ///    at most `PAGE_METADATA_ALIGN` bytes of alignment;
85    ///  - the subsequent fields can utilize the padding of the
86    ///    reference count to save space.
87    ///
88    /// Don't interpret this field as an array of bytes. It is a
89    /// placeholder for the metadata of a frame.
90    storage: UnsafeCell<MetaSlotStorage>,
91    /// The reference count of the page.
92    ///
93    /// Specifically, the reference count has the following meaning:
94    ///  - `REF_COUNT_UNUSED`: The page is not in use.
95    ///  - `REF_COUNT_UNIQUE`: The page is owned by a [`UniqueFrame`].
96    ///  - `0`: The page is being constructed ([`Frame::from_unused`])
97    ///    or destructured ([`drop_last_in_place`]).
98    ///  - `1..REF_COUNT_MAX`: The page is in use.
99    ///  - `REF_COUNT_MAX..REF_COUNT_UNIQUE`: Illegal values to
100    ///    prevent the reference count from overflowing. Otherwise,
101    ///    overflowing the reference count will cause soundness issue.
102    ///
103    /// [`Frame::from_unused`]: super::Frame::from_unused
104    /// [`UniqueFrame`]: super::unique::UniqueFrame
105    /// [`drop_last_in_place`]: Self::drop_last_in_place
106    //
107    // Other than this field the fields should be `MaybeUninit`.
108    // See initialization in `alloc_meta_frames`.
109    pub(super) ref_count: AtomicU64,
110    /// The virtual table that indicates the type of the metadata.
111//    pub(super) vtable_ptr: UnsafeCell<MaybeUninit<FrameMetaVtablePtr>>,
112    /// This is only accessed by [`crate::mm::frame::linked_list`].
113    /// It stores 0 if the frame is not in any list, otherwise it stores the
114    /// ID of the list.
115    ///
116    /// It is ugly but allows us to tell if a frame is in a specific list by
117    /// one relaxed read. Otherwise, if we store it conditionally in `storage`
118    /// we would have to ensure that the type is correct before the read, which
119    /// costs a synchronization.
120    pub(super) in_list: AtomicU64,
121}*/
122
123verus! {
124
125pub const REF_COUNT_UNUSED: u64 = u64::MAX;
126
127pub const REF_COUNT_UNIQUE: u64 = u64::MAX - 1;
128
129pub const REF_COUNT_MAX: u64 = i64::MAX as u64;
130
131type FrameMetaVtablePtr = core::ptr::DynMetadata<dyn AnyFrameMeta>;
132
133} // verus!
134//const_assert!(PAGE_SIZE % META_SLOT_SIZE == 0);
135//const_assert!(size_of::<MetaSlot>() == META_SLOT_SIZE);
136/*/// Makes a structure usable as a frame metadata.
137#[macro_export]
138macro_rules! impl_frame_meta_for {
139    // Implement without specifying the drop behavior.
140    ($t:ty) => {
141        // SAFETY: `on_drop` won't read the page.
142        unsafe impl $crate::mm::frame::meta::AnyFrameMeta for $t {}
143
144/*        $crate::const_assert!(
145            core::mem::size_of::<$t>() <= $crate::mm::frame::meta::FRAME_METADATA_MAX_SIZE
146        );
147        $crate::const_assert!(
148            $crate::mm::frame::meta::FRAME_METADATA_MAX_ALIGN % core::mem::align_of::<$t>() == 0
149        );*/
150    };
151}
152
153pub use impl_frame_meta_for;
154*/
155verus! {
156
157/// The error type for getting the frame from a physical address.
158#[derive(Debug)]
159pub enum GetFrameError {
160    /// The frame is in use.
161    InUse,
162    /// The frame is not in use.
163    Unused,
164    /// The frame is being initialized or destructed.
165    Busy,
166    /// The frame is private to an owner of [`UniqueFrame`].
167    ///
168    /// [`UniqueFrame`]: super::unique::UniqueFrame
169    Unique,
170    /// The provided physical address is out of bound.
171    OutOfBound,
172    /// The provided physical address is not aligned.
173    NotAligned,
174    /// Verification only: `compare_exchange` returned `Err`, retry
175    Retry,
176}
177
178pub open spec fn get_slot_spec(paddr: Paddr) -> (res: PPtr<MetaSlot>)
179    recommends
180        paddr % 4096 == 0,
181        paddr < MAX_PADDR(),
182{
183    let slot = frame_to_meta(paddr);
184    PPtr(slot, PhantomData::<MetaSlot>)
185}
186
187pub struct StoredPageTablePageMeta {
188    pub nr_children: PCell<u16>,
189    pub stray: PCell<bool>,
190    pub level: PagingLevel,
191    pub lock: PAtomicU8,
192}
193
194pub enum MetaSlotStorage {
195    Empty([u8; 39]),
196    FrameLink(super::linked_list::StoredLink),
197    PTNode(StoredPageTablePageMeta),
198}
199
200impl MetaSlotStorage {
201    pub open spec fn get_link_spec(self) -> Option<super::linked_list::StoredLink> {
202        match self {
203            MetaSlotStorage::FrameLink(link) => Some(link),
204            _ => None,
205        }
206    }
207
208    #[verifier::when_used_as_spec(get_link_spec)]
209    pub fn get_link(self) -> (res: Option<super::linked_list::StoredLink>)
210        ensures
211            res == self.get_link_spec(),
212    {
213        match self {
214            MetaSlotStorage::FrameLink(link) => Some(link),
215            _ => None,
216        }
217    }
218
219    pub open spec fn get_node_spec(self) -> Option<StoredPageTablePageMeta> {
220        match self {
221            MetaSlotStorage::PTNode(node) => Some(node),
222            _ => None,
223        }
224    }
225
226    #[verifier::when_used_as_spec(get_node_spec)]
227    pub fn get_node(self) -> (res: Option<StoredPageTablePageMeta>)
228        ensures
229            res == self.get_node_spec(),
230    {
231        match self {
232            MetaSlotStorage::PTNode(node) => Some(node),
233            _ => None,
234        }
235    }
236}
237
238/// Space-holder of the AnyFrameMeta virtual table.
239pub trait AnyFrameMeta: Repr<MetaSlot> {
240    exec fn on_drop(&mut self) {
241    }
242
243    exec fn is_untyped(&self) -> bool {
244        false
245    }
246
247    spec fn vtable_ptr(&self) -> usize;
248}
249
250/// `MetaSlotStorage` is an inductive tagged union of all of the frame meta types that
251/// we work with in this development. So, it should itself implement `AnyFrameMeta`, and
252/// it can then be used to stand in for `dyn AnyFrameMeta`.
253impl AnyFrameMeta for MetaSlotStorage {
254    uninterp spec fn vtable_ptr(&self) -> usize;
255}
256
257impl Repr<MetaSlot> for MetaSlotStorage {
258    uninterp spec fn wf(slot: MetaSlot) -> bool;
259
260    uninterp spec fn to_repr_spec(self) -> MetaSlot;
261
262    #[verifier::external_body]
263    fn to_repr(self) -> MetaSlot {
264        todo!()
265    }
266
267    uninterp spec fn from_repr_spec(slot: MetaSlot) -> Self;
268
269    #[verifier::external_body]
270    fn from_repr(slot: MetaSlot) -> Self {
271        todo!()
272    }
273
274    #[verifier::external_body]
275    fn from_borrowed<'a>(slot: &'a MetaSlot) -> &'a Self {
276        todo!()
277    }
278
279    proof fn from_to_repr(self) {
280        admit()
281    }
282
283    proof fn to_from_repr(slot: MetaSlot) {
284        admit()
285    }
286
287    proof fn to_repr_wf(self) {
288        admit()
289    }
290}
291
292#[rustc_has_incoherent_inherent_impls]
293pub struct MetaSlot {
294    pub storage: PPtr<MetaSlotStorage>,
295    pub ref_count: PAtomicU64,
296    pub vtable_ptr: PPtr<usize>,
297    pub in_list: PAtomicU64,
298}
299
300//global layout MetaSlot is size == 64, align == 8;
301pub broadcast proof fn lemma_meta_slot_size()
302    ensures
303        #[trigger] size_of::<MetaSlot>() == META_SLOT_SIZE(),
304{
305    admit()
306}
307
308pub proof fn size_of_meta_slot()
309    ensures
310        size_of::<MetaSlot>() == 64,
311        align_of::<MetaSlot>() == 8,
312{
313    admit()
314}
315
316#[inline(always)]
317#[verifier::allow_in_spec]
318pub const fn meta_slot_size() -> (res: usize)
319    returns
320        64usize,
321{
322    proof {
323        size_of_meta_slot();
324    }
325    size_of::<MetaSlot>()
326}
327
328/// Gets the reference to a metadata slot.
329pub fn get_slot(paddr: Paddr, Tracked(owner): Tracked<&MetaSlotOwner>) -> (res: Result<
330    PPtr<MetaSlot>,
331    GetFrameError,
332>)
333    requires
334        owner.self_addr == frame_to_meta(paddr),
335        owner.inv(),
336    ensures
337        res.is_ok() ==> res.unwrap().addr() == frame_to_meta(paddr),
338{
339    if paddr % PAGE_SIZE() != 0 {
340        return Err(GetFrameError::NotAligned);
341    }
342    if paddr >= MAX_PADDR() {
343        return Err(GetFrameError::OutOfBound);
344    }
345    let vaddr = frame_to_meta(paddr);
346    let ptr = PPtr::<MetaSlot>::from_addr(vaddr);
347
348    // SAFETY: `ptr` points to a valid `MetaSlot` that will never be
349    // mutably borrowed, so taking an immutable reference to it is safe.
350    Ok(ptr)
351}
352
353impl MetaSlot {
354    // These are the axioms for casting meta slots into other things
355    /// This is the equivalent of &self as *const as Vaddr, but we need to axiomatize it.
356    #[rustc_allow_incoherent_impl]
357    #[verifier::external_body]
358    pub fn addr_of(&self, Tracked(perm): Tracked<&vstd::simple_pptr::PointsTo<MetaSlot>>) -> Paddr
359        requires
360            self == perm.value(),
361        returns
362            perm.addr(),
363    {
364        unimplemented!()
365    }
366
367    pub fn cast_slot<T: Repr<MetaSlot>>(
368        &self,
369        addr: usize,
370        Tracked(perm): Tracked<&vstd::simple_pptr::PointsTo<MetaSlot>>,
371    ) -> (res: ReprPtr<MetaSlot, T>)
372        requires
373            perm.value() == self,
374            addr == perm.addr(),
375        ensures
376            res.ptr.addr() == addr,
377            res.addr == addr,
378    {
379        ReprPtr::<MetaSlot, T> { addr: addr, ptr: PPtr::from_addr(addr), _T: PhantomData }
380    }
381
382    pub fn cast_perm<T: Repr<MetaSlot>>(
383        addr: usize,
384        Tracked(perm): Tracked<vstd::simple_pptr::PointsTo<MetaSlot>>,
385    ) -> Tracked<PointsTo<MetaSlot, T>> {
386        Tracked(PointsTo { addr: addr, points_to: perm, _T: PhantomData })
387    }
388
389    /// Initializes the metadata slot of a frame assuming it is unused.
390    ///
391    /// If successful, the function returns a pointer to the metadata slot.
392    /// And the slot is initialized with the given metadata.
393    ///
394    /// The resulting reference count held by the returned pointer is
395    /// [`REF_COUNT_UNIQUE`] if `as_unique_ptr` is `true`, otherwise `1`.
396    #[rustc_allow_incoherent_impl]
397    #[verus_spec(
398        with Tracked(regions): Tracked<&mut MetaRegionOwners>
399    )]
400    pub(super) fn get_from_unused<M: AnyFrameMeta>(
401        paddr: Paddr,
402        metadata: M,
403        as_unique_ptr: bool,
404    ) -> (res: Result<PPtr<Self>, GetFrameError>)
405        requires
406            paddr < MAX_PADDR(),
407            paddr % PAGE_SIZE() == 0,
408            old(regions).inv(),
409            old(regions).slots.contains_key(frame_to_index(paddr)),
410            old(regions).slot_owners[frame_to_index(paddr)].usage is Unused,
411            old(regions).slot_owners[frame_to_index(paddr)].in_list.points_to(0),
412            old(regions).slot_owners[frame_to_index(paddr)].self_addr == frame_to_meta(paddr),
413        ensures
414            res matches Ok(res) ==> MetaSlot::get_from_unused_spec::<M>(
415                paddr,
416                metadata,
417                as_unique_ptr,
418                old(regions).view(),
419            ) == (res, regions.view()),
420    {
421        proof {
422            regions.inv_implies_correct_addr(paddr);
423        }
424
425        let tracked mut slot_own = regions.slot_owners.tracked_remove(frame_to_index(paddr));
426        let tracked mut slot_perm = regions.slots.tracked_remove(frame_to_index(paddr));
427
428        let slot = get_slot(paddr, Tracked(&slot_own))?;
429
430        // `Acquire` pairs with the `Release` in `drop_last_in_place` and ensures the metadata
431        // initialization won't be reordered before this memory compare-and-exchange.
432        slot.borrow(Tracked(&slot_perm)).ref_count.compare_exchange(
433            Tracked(&mut slot_own.ref_count),
434            REF_COUNT_UNUSED,
435            0,
436        ).map_err(
437            |val|
438                match val {
439                    REF_COUNT_UNIQUE => GetFrameError::Unique,
440                    0 => GetFrameError::Busy,
441                    _ => GetFrameError::InUse,
442                },
443        )?;
444
445        // SAFETY: The slot now has a reference count of `0`, other threads will
446        // not access the metadata slot so it is safe to have a mutable reference.
447        let contents = slot.take(Tracked(&mut slot_perm));
448        #[verus_spec(with Tracked(&mut slot_own))]
449        contents.write_meta(metadata);
450        slot.put(Tracked(&mut slot_perm), contents);
451
452        if as_unique_ptr {
453            // No one can create a `Frame` instance directly from the page
454            // address, so `Relaxed` is fine here.
455            slot.borrow(Tracked(&slot_perm)).ref_count.store(
456                Tracked(&mut slot_own.ref_count),
457                REF_COUNT_UNIQUE,
458            );
459        } else {
460            // `Release` is used to ensure that the metadata initialization
461            // won't be reordered after this memory store.
462            slot.borrow(Tracked(&slot_perm)).ref_count.store(Tracked(&mut slot_own.ref_count), 1);
463        }
464
465        proof {
466            slot_own.usage = PageUsage::Frame;
467            regions.slots.tracked_insert(frame_to_index(paddr), slot_perm);
468            regions.slot_owners.tracked_insert(frame_to_index(paddr), slot_own);
469
470            assert(regions@.slots == old(regions)@.slots.insert(frame_to_index(paddr), slot_own@));
471
472            // Since storage is uninit currently we
473            // delay the proof here.
474            assume(slot_own@ == MetaSlot::from_unused_slot_spec::<M>(
475                paddr,
476                metadata,
477                as_unique_ptr,
478            ));
479        }
480
481        Ok(slot)
482    }
483
484    #[rustc_allow_incoherent_impl]
485    #[verus_spec(
486        with Tracked(regions): Tracked<&mut MetaRegionOwners>
487    )]
488    pub(super) fn get_from_in_use_loop(slot: PPtr<MetaSlot>) -> Result<PPtr<Self>, GetFrameError>
489        requires
490            old(regions).slots.contains_key(frame_to_index(meta_to_frame(slot.addr()))),
491            old(regions).slot_owners.contains_key(frame_to_index(meta_to_frame(slot.addr()))),
492            old(regions).slots[frame_to_index(meta_to_frame(slot.addr()))].pptr() == slot,
493            old(regions).inv(),
494    {
495        let tracked mut meta_perm = regions.slots.tracked_remove(
496            frame_to_index(meta_to_frame(slot.addr())),
497        );
498        let tracked mut slot_own = regions.slot_owners.tracked_remove(
499            frame_to_index(meta_to_frame(slot.addr())),
500        );
501
502        match slot.borrow(Tracked(&meta_perm)).ref_count.load(Tracked(&mut slot_own.ref_count)) {
503            REF_COUNT_UNUSED => return Err(GetFrameError::Unused),
504            REF_COUNT_UNIQUE => return Err(GetFrameError::Unique),
505            0 => return Err(GetFrameError::Busy),
506            last_ref_cnt => {
507                if last_ref_cnt >= REF_COUNT_MAX {
508                    // See `Self::inc_ref_count` for the explanation.
509                    assert(false)
510                }
511                // Using `Acquire` here to pair with `get_from_unused` or
512                // `<Frame<M> as From<UniqueFrame<M>>>::from` (who must be
513                // performed after writing the metadata).
514                //
515                // It ensures that the written metadata will be visible to us.
516
517                if slot.borrow(Tracked(&meta_perm)).ref_count.compare_exchange_weak(
518                    Tracked(&mut slot_own.ref_count),
519                    last_ref_cnt,
520                    last_ref_cnt + 1,
521                ).is_ok() {
522                    return Ok(slot);
523                } else {
524                    return Err(GetFrameError::Retry);
525                }
526            },
527        }
528    }
529
530    /// Gets another owning pointer to the metadata slot from the given page.
531    #[rustc_allow_incoherent_impl]
532    #[verifier::external_body]
533    #[verus_spec(
534        with Tracked(regions): Tracked<&mut MetaRegionOwners>
535    )]
536    pub(super) fn get_from_in_use(paddr: Paddr) -> Result<PPtr<Self>, GetFrameError> {
537        let slot = get_slot(paddr, Tracked(regions.slot_owners.tracked_borrow(paddr)))?;
538
539        // Try to increase the reference count for an in-use frame. Otherwise fail.
540        loop {
541            match Self::get_from_in_use_loop(slot) {
542                Err(GetFrameError::Retry) => {
543                    core::hint::spin_loop();
544                },
545                res => return res,
546            }
547        }
548    }
549
550    /// Increases the frame reference count by one.
551    ///
552    /// # Safety
553    ///
554    /// The caller must have already held a reference to the frame.
555    #[rustc_allow_incoherent_impl]
556    #[verus_spec(
557        with Tracked(rc_perm): Tracked<&mut PermissionU64>
558    )]
559    pub fn inc_ref_count(&self)
560        requires
561            old(rc_perm).is_for(self.ref_count),
562            old(rc_perm).value() != 0,
563            old(rc_perm).value() != REF_COUNT_UNUSED,
564            old(rc_perm).value() < REF_COUNT_MAX,
565    {
566        let last_ref_cnt = self.ref_count.fetch_add(Tracked(rc_perm), 1);
567
568        if last_ref_cnt >= REF_COUNT_MAX {
569            // This follows the same principle as the `Arc::clone` implementation to prevent the
570            // reference count from overflowing. See also
571            // <https://doc.rust-lang.org/std/sync/struct.Arc.html#method.clone>.
572            assert(false);
573        }
574    }
575
576    /// Gets the corresponding frame's physical address.
577    #[rustc_allow_incoherent_impl]
578    #[verus_spec(
579        with Tracked(perm): Tracked<&vstd::simple_pptr::PointsTo<MetaSlot>>
580    )]
581    pub fn frame_paddr(&self) -> (pa: Paddr)
582        requires
583            perm.value() == self,
584            FRAME_METADATA_RANGE().start <= perm.addr() < FRAME_METADATA_RANGE().end,
585            perm.addr() % META_SLOT_SIZE() == 0,
586        returns
587            meta_to_frame(perm.addr()),
588    {
589        let addr = self.addr_of(Tracked(perm));
590        meta_to_frame(addr)
591    }
592
593    /*
594    /// Gets a dynamically typed pointer to the stored metadata.
595    ///
596    /// # Safety
597    ///
598    /// The caller should ensure that:
599    ///  - the stored metadata is initialized (by [`Self::write_meta`]) and valid.
600    ///
601    /// The returned pointer should not be dereferenced as mutable unless having
602    /// exclusive access to the metadata slot.
603    #[rustc_allow_incoherent_impl]
604    #[verifier::external_body]
605    pub(super) unsafe fn dyn_meta_ptr<M: AnyFrameMeta>(&self) -> PPtr<M> {
606        unimplemented!()
607
608        // SAFETY: The page metadata is valid to be borrowed immutably, since
609        // it will never be borrowed mutably after initialization.
610        let vtable_ptr = unsafe { *self.vtable_ptr.get() };
611
612        // SAFETY: The page metadata is initialized and valid.
613        let vtable_ptr = *unsafe { vtable_ptr.assume_init_ref() };
614
615        let meta_ptr: *mut dyn AnyFrameMeta =
616            core::ptr::from_raw_parts_mut(self as *const MetaSlot as *mut MetaSlot, vtable_ptr);
617
618        meta_ptr
619    }*/
620    /// Gets the stored metadata as type `M`.
621    ///
622    /// Calling the method should be safe, but using the returned pointer would
623    /// be unsafe. Specifically, the derefernecer should ensure that:
624    ///  - the stored metadata is initialized (by [`Self::write_meta`]) and
625    ///    valid;
626    ///  - the initialized metadata is of type `M`;
627    ///  - the returned pointer should not be dereferenced as mutable unless
628    ///    having exclusive access to the metadata slot.
629    #[rustc_allow_incoherent_impl]
630    #[verus_spec(
631        with Tracked(perm): Tracked<&vstd::simple_pptr::PointsTo<MetaSlot>>
632    )]
633    pub fn as_meta_ptr<M: AnyFrameMeta + Repr<MetaSlot>>(&self) -> (res: ReprPtr<MetaSlot, M>)
634        requires
635            self == perm.value(),
636        ensures
637            res.ptr.addr() == perm.addr(),
638            res.addr == perm.addr(),
639    {
640        let addr = self.addr_of(Tracked(perm));
641
642        self.cast_slot(addr, Tracked(perm))
643    }
644
645    /// Writes the metadata to the slot without reading or dropping the previous value.
646    ///
647    /// # Safety
648    ///
649    /// The caller should have exclusive access to the metadata slot's fields.
650    #[rustc_allow_incoherent_impl]
651    #[verifier::external_body]
652    #[verus_spec(
653        with Tracked(slot_own): Tracked<&mut MetaSlotOwner>
654    )]
655    pub fn write_meta<M: AnyFrameMeta + Repr<MetaSlot>>(&self, metadata: M)
656        requires
657    //            old(regions).slots.contains_key()
658
659            old(slot_own).storage.pptr() == self.storage,
660        ensures
661            slot_own.ref_count == old(slot_own).ref_count,
662            slot_own.vtable_ptr.is_init(),
663            slot_own.vtable_ptr.value() == metadata.vtable_ptr(),
664            slot_own.in_list == old(slot_own).in_list,
665            slot_own.self_addr == old(slot_own).self_addr,
666    {
667        //        const { assert!(size_of::<M>() <= FRAME_METADATA_MAX_SIZE) };
668        //        const { assert!(align_of::<M>() <= FRAME_METADATA_MAX_ALIGN) };
669        // SAFETY: Caller ensures that the access to the fields are exclusive.
670        //        let vtable_ptr = unsafe { &mut *self.vtable_ptr.get() };
671        //        vtable_ptr.write(core::ptr::metadata(&metadata as &dyn AnyFrameMeta));
672        let ptr = &self.storage;
673
674        // SAFETY:
675        // 1. `ptr` points to the metadata storage.
676        // 2. The size and the alignment of the metadata storage is large enough to hold `M`
677        //    (guaranteed by the const assertions above).
678        // 3. We have exclusive access to the metadata storage (guaranteed by the caller).
679        //        M::cast_to(ptr).put(Tracked(slot_own.storage.borrow_mut()), &metadata);
680
681    }
682
683    /// Drops the metadata and deallocates the frame.
684    ///
685    /// # Safety
686    ///
687    /// The caller should ensure that:
688    ///  - the reference count is `0` (so we are the sole owner of the frame);
689    ///  - the metadata is initialized;
690    #[rustc_allow_incoherent_impl]
691    #[verus_spec(
692        with Tracked(rc_perm): Tracked<&mut PermissionU64>
693    )]
694    pub(super) unsafe fn drop_last_in_place(&self)
695        requires
696            self.ref_count.id() == old(rc_perm)@.patomic,
697    {
698        // This should be guaranteed as a safety requirement.
699        //        debug_assert_eq!(self.ref_count.load(Tracked(&*rc_perm)), 0);
700        // SAFETY: The caller ensures safety.
701        unsafe { self.drop_meta_in_place() };
702
703        // `Release` pairs with the `Acquire` in `Frame::from_unused` and ensures
704        // `drop_meta_in_place` won't be reordered after this memory store.
705        self.ref_count.store(Tracked(rc_perm), REF_COUNT_UNUSED);
706    }
707
708    /// Drops the metadata of a slot in place.
709    ///
710    /// After this operation, the metadata becomes uninitialized. Any access to the
711    /// metadata is undefined behavior unless it is re-initialized by [`Self::write_meta`].
712    ///
713    /// # Safety
714    ///
715    /// The caller should ensure that:
716    ///  - the reference count is `0` (so we are the sole owner of the frame);
717    ///  - the metadata is initialized;
718    #[rustc_allow_incoherent_impl]
719    #[verifier::external_body]
720    pub(super) unsafe fn drop_meta_in_place(&self) {
721        unimplemented!()/*        let paddr = self.frame_paddr();
722
723        // SAFETY: We have exclusive access to the frame metadata.
724        let vtable_ptr = unsafe { &mut *self.vtable_ptr.get() };
725        // SAFETY: The frame metadata is initialized and valid.
726        let vtable_ptr = unsafe { vtable_ptr.assume_init_read() };
727
728        let meta_ptr: *mut dyn AnyFrameMeta =
729            core::ptr::from_raw_parts_mut(self.storage.get(), vtable_ptr);
730
731        // SAFETY: The implementer of the frame metadata decides that if the frame
732        // is safe to be read or not.
733        let mut reader =
734            unsafe { VmReader::from_kernel_space(paddr_to_vaddr(paddr) as *const u8, PAGE_SIZE) };
735
736        // SAFETY: `ptr` points to the metadata storage which is valid to be mutably borrowed under
737        // `vtable_ptr` because the metadata is valid, the vtable is correct, and we have the exclusive
738        // access to the frame metadata.
739        unsafe {
740            // Invoke the custom `on_drop` handler.
741            (*meta_ptr).on_drop(&mut reader);
742            // Drop the frame metadata.
743            core::ptr::drop_in_place(meta_ptr);
744        }*/
745
746    }
747}
748
749/// The metadata of frames that holds metadata of frames.
750#[derive(Debug, Default)]
751pub struct MetaPageMeta {}
752
753//impl_frame_meta_for!(MetaPageMeta);
754/*
755/// Initializes the metadata of all physical frames.
756///
757/// The function returns a list of `Frame`s containing the metadata.
758///
759/// # Safety
760///
761/// This function should be called only once and only on the BSP,
762/// before any APs are started.
763pub(crate) unsafe fn init() -> Segment<MetaPageMeta> {
764    let max_paddr = {
765        let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions;
766        regions
767            .iter()
768            .filter(|r| r.typ() == MemoryRegionType::Usable)
769            .map(|r| r.base() + r.len())
770            .max()
771            .unwrap()
772    };
773
774    info!(
775        "Initializing frame metadata for physical memory up to {:x}",
776        max_paddr
777    );
778
779    // In RISC-V, the boot page table has mapped the 512GB memory,
780    // so we don't need to add temporary linear mapping.
781    // In LoongArch, the DWM0 has mapped the whole memory,
782    // so we don't need to add temporary linear mapping.
783    #[cfg(target_arch = "x86_64")]
784    add_temp_linear_mapping(max_paddr);
785
786    let tot_nr_frames = max_paddr / page_size::<PagingConsts>(1);
787    let (nr_meta_pages, meta_pages) = alloc_meta_frames(tot_nr_frames);
788
789    // Map the metadata frames.
790    boot_pt::with_borrow(|boot_pt| {
791        for i in 0..nr_meta_pages {
792            let frame_paddr = meta_pages + i * PAGE_SIZE;
793            let vaddr = frame_to_meta::<PagingConsts>(0) + i * PAGE_SIZE;
794            let prop = PageProperty {
795                flags: PageFlags::RW,
796                cache: CachePolicy::Writeback,
797                priv_flags: PrivilegedPageFlags::GLOBAL,
798            };
799            // SAFETY: we are doing the metadata mappings for the kernel.
800            unsafe { boot_pt.map_base_page(vaddr, frame_paddr / PAGE_SIZE, prop) };
801        }
802    })
803    .unwrap();
804
805    // Now the metadata frames are mapped, we can initialize the metadata.
806    super::MAX_PADDR.store(max_paddr, Ordering::Relaxed);
807
808    let meta_page_range = meta_pages..meta_pages + nr_meta_pages * PAGE_SIZE;
809
810    let (range_1, range_2) = allocator::EARLY_ALLOCATOR
811        .lock()
812        .as_ref()
813        .unwrap()
814        .allocated_regions();
815    for r in range_difference(&range_1, &meta_page_range) {
816        let early_seg = Segment::from_unused(r, |_| EarlyAllocatedFrameMeta).unwrap();
817        let _ = ManuallyDrop::new(early_seg);
818    }
819    for r in range_difference(&range_2, &meta_page_range) {
820        let early_seg = Segment::from_unused(r, |_| EarlyAllocatedFrameMeta).unwrap();
821        let _ = ManuallyDrop::new(early_seg);
822    }
823
824    mark_unusable_ranges();
825
826    Segment::from_unused(meta_page_range, |_| MetaPageMeta {}).unwrap()
827}
828
829/// Returns whether the global frame allocator is initialized.
830pub(in crate::mm) fn is_initialized() -> bool {
831    // `init` sets it with relaxed ordering somewhere in the middle. But due
832    // to the safety requirement of the `init` function, we can assume that
833    // there is no race conditions.
834    super::MAX_PADDR.load(Ordering::Relaxed) != 0
835}
836
837fn alloc_meta_frames(tot_nr_frames: usize) -> (usize, Paddr) {
838    let nr_meta_pages = tot_nr_frames
839        .checked_mul(size_of::<MetaSlot>())
840        .unwrap()
841        .div_ceil(PAGE_SIZE);
842    let start_paddr = allocator::early_alloc(
843        Layout::from_size_align(nr_meta_pages * PAGE_SIZE, PAGE_SIZE).unwrap(),
844    )
845    .unwrap();
846
847    let slots = paddr_to_vaddr(start_paddr) as *mut MetaSlot;
848
849    // Initialize the metadata slots.
850    for i in 0..tot_nr_frames {
851        // SAFETY: The memory is successfully allocated with `tot_nr_frames`
852        // slots so the index must be within the range.
853        let slot = unsafe { slots.add(i) };
854        // SAFETY: The memory is just allocated so we have exclusive access and
855        // it's valid for writing.
856        unsafe {
857            slot.write(MetaSlot {
858                storage: UnsafeCell::new(MetaSlotStorage::Empty([0; FRAME_METADATA_MAX_SIZE - 1])),
859                ref_count: AtomicU64::new(REF_COUNT_UNUSED),
860                vtable_ptr: UnsafeCell::new(MaybeUninit::uninit()),
861                in_list: AtomicU64::new(0),
862            })
863        };
864    }
865
866    (nr_meta_pages, start_paddr)
867}
868
869/// Unusable memory metadata. Cannot be used for any purposes.
870#[derive(Debug)]
871pub struct UnusableMemoryMeta;
872impl_frame_meta_for!(UnusableMemoryMeta);
873
874/// Reserved memory metadata. Maybe later used as I/O memory.
875#[derive(Debug)]
876pub struct ReservedMemoryMeta;
877impl_frame_meta_for!(ReservedMemoryMeta);
878
879/// The metadata of physical pages that contains the kernel itself.
880#[derive(Debug, Default)]
881pub struct KernelMeta;
882impl_frame_meta_for!(KernelMeta);
883
884macro_rules! mark_ranges {
885    ($region: expr, $typ: expr) => {{
886        debug_assert!($region.base() % PAGE_SIZE == 0);
887        debug_assert!($region.len() % PAGE_SIZE == 0);
888
889        let seg = Segment::from_unused($region.base()..$region.end(), |_| $typ).unwrap();
890        let _ = ManuallyDrop::new(seg);
891    }};
892}
893
894fn mark_unusable_ranges() {
895    let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions;
896
897    for region in regions
898        .iter()
899        .rev()
900        .skip_while(|r| r.typ() != MemoryRegionType::Usable)
901    {
902        match region.typ() {
903            MemoryRegionType::BadMemory => mark_ranges!(region, UnusableMemoryMeta),
904            MemoryRegionType::Unknown => mark_ranges!(region, ReservedMemoryMeta),
905            MemoryRegionType::NonVolatileSleep => mark_ranges!(region, UnusableMemoryMeta),
906            MemoryRegionType::Reserved => mark_ranges!(region, ReservedMemoryMeta),
907            MemoryRegionType::Kernel => mark_ranges!(region, KernelMeta),
908            MemoryRegionType::Module => mark_ranges!(region, UnusableMemoryMeta),
909            MemoryRegionType::Framebuffer => mark_ranges!(region, ReservedMemoryMeta),
910            MemoryRegionType::Reclaimable => mark_ranges!(region, UnusableMemoryMeta),
911            MemoryRegionType::Usable => {} // By default it is initialized as usable.
912        }
913    }
914}
915
916/// Adds a temporary linear mapping for the metadata frames.
917///
918/// We only assume boot page table to contain 4G linear mapping. Thus if the
919/// physical memory is huge we end up depleted of linear virtual memory for
920/// initializing metadata.
921#[cfg(target_arch = "x86_64")]
922fn add_temp_linear_mapping(max_paddr: Paddr) {
923    const PADDR4G: Paddr = 0x1_0000_0000;
924
925    if max_paddr <= PADDR4G {
926        return;
927    }
928
929    // TODO: We don't know if the allocator would allocate from low to high or
930    // not. So we prepare all linear mappings in the boot page table. Hope it
931    // won't drag the boot performance much.
932    let end_paddr = max_paddr.align_up(PAGE_SIZE);
933    let prange = PADDR4G..end_paddr;
934    let prop = PageProperty {
935        flags: PageFlags::RW,
936        cache: CachePolicy::Writeback,
937        priv_flags: PrivilegedPageFlags::GLOBAL,
938    };
939
940    // SAFETY: we are doing the linear mapping for the kernel.
941    unsafe {
942        boot_pt::with_borrow(|boot_pt| {
943            for paddr in prange.step_by(PAGE_SIZE) {
944                let vaddr = LINEAR_MAPPING_BASE_VADDR + paddr;
945                boot_pt.map_base_page(vaddr, paddr / PAGE_SIZE, prop);
946            }
947        })
948        .unwrap();
949    }
950}
951*/
952} // verus!