ostd/mm/frame/meta.rs
1// SPDX-License-Identifier: MPL-2.0
2//! Metadata management of frames.
3//!
4//! You can picture a globally shared, static, gigantic array of metadata
5//! initialized for each frame.
6//! Each entry in this array holds the metadata for a single frame.
7//! There would be a dedicated small
8//! "heap" space in each slot for dynamic metadata. You can store anything as
9//! the metadata of a frame as long as it's [`Sync`].
10//!
11//! # Implementation
12//!
13//! The slots are placed in the metadata pages mapped to a certain virtual
14//! address in the kernel space. So finding the metadata of a frame often
15//! comes with no costs since the translation is a simple arithmetic operation.
16use vstd::prelude::*;
17
18pub mod mapping;
19
20use self::mapping::{frame_to_index, frame_to_meta, meta_addr, meta_to_frame, META_SLOT_SIZE};
21use crate::specs::mm::frame::meta_owners::*;
22use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
23
24use vstd::atomic::{PAtomicU64, PAtomicU8, PermissionU64};
25use vstd::cell::pcell_maybe_uninit;
26
27use vstd::simple_pptr::{self, PPtr};
28use vstd_extra::cast_ptr::*;
29use vstd_extra::ownership::*;
30
31use core::{
32 alloc::Layout,
33 any::Any,
34 cell::UnsafeCell,
35 fmt::Debug,
36 marker::PhantomData,
37 mem::{align_of, size_of, ManuallyDrop, MaybeUninit},
38 result::Result,
39 sync::atomic::{AtomicU64, AtomicU8, Ordering},
40};
41
42//use align_ext::AlignExt;
43//use log::info;
44
45use crate::{
46 // boot::memory_region::MemoryRegionType,
47 // const_assert,
48 mm::{
49 // frame::allocator::{self, EarlyAllocatedFrameMeta},
50 paddr_to_vaddr,
51 // page_table::boot_pt,
52 page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
53 /*Infallible,*/ Paddr,
54 PagingLevel,
55 //Segment,
56 Vaddr,
57 MAX_NR_PAGES,
58 MAX_PADDR,
59 /*VmReader,*/ PAGE_SIZE,
60 },
61 specs::arch::kspace::FRAME_METADATA_RANGE,
62 // panic::abort,
63 // util::ops::range_difference,
64};
65
66verus! {
67
68#[repr(C)]
69pub struct MetaSlot {
70 /// The metadata of a frame.
71 ///
72 /// It is placed at the beginning of a slot because:
73 /// - the implementation can simply cast a `*const MetaSlot`
74 /// to a `*const AnyFrameMeta` for manipulation;
75 /// - if the metadata need special alignment, we can provide
76 /// at most `PAGE_METADATA_ALIGN` bytes of alignment;
77 /// - the subsequent fields can utilize the padding of the
78 /// reference count to save space.
79 ///
80 /// # Verification Design
81 /// We model the metadata of the slot as a `MetaSlotStorage`, which is a tagged union of the different
82 /// types of metadata defined in the development.
83 pub storage: pcell_maybe_uninit::PCell<MetaSlotStorage>,
84 /// The reference count of the page.
85 ///
86 /// Specifically, the reference count has the following meaning:
87 /// - `REF_COUNT_UNUSED`: The page is not in use.
88 /// - `REF_COUNT_UNIQUE`: The page is owned by a [`UniqueFrame`].
89 /// - `0`: The page is being constructed ([`Frame::from_unused`])
90 /// or destructured ([`drop_last_in_place`]).
91 /// - `1..REF_COUNT_MAX`: The page is in use.
92 /// - `REF_COUNT_MAX..REF_COUNT_UNIQUE`: Illegal values to
93 /// prevent the reference count from overflowing. Otherwise,
94 /// overflowing the reference count will cause soundness issue.
95 pub ref_count: PAtomicU64,
96 /// The virtual table that indicates the type of the metadata. Currently we do not verify this because
97 /// of the dependency on the `dyn Trait` pattern. But we can revisit it now that `dyn Trait` is supported by Verus.
98 pub vtable_ptr: PPtr<usize>,
99 /// This is only accessed by [`crate::mm::frame::linked_list`].
100 /// It stores 0 if the frame is not in any list, otherwise it stores the
101 /// ID of the list.
102 ///
103 /// It is ugly but allows us to tell if a frame is in a specific list by
104 /// one relaxed read. Otherwise, if we store it conditionally in `storage`
105 /// we would have to ensure that the type is correct before the read, which
106 /// costs a synchronization.
107 pub in_list: PAtomicU64,
108}
109
110pub const REF_COUNT_UNUSED: u64 = u64::MAX;
111
112pub const REF_COUNT_UNIQUE: u64 = u64::MAX - 1;
113
114pub const REF_COUNT_MAX: u64 = i64::MAX as u64;
115
116type FrameMetaVtablePtr = core::ptr::DynMetadata<dyn AnyFrameMeta>;
117
118/// The error type for getting the frame from a physical address.
119#[derive(Debug)]
120pub enum GetFrameError {
121 /// The frame is in use.
122 InUse,
123 /// The frame is not in use.
124 Unused,
125 /// The frame is being initialized or destructed.
126 Busy,
127 /// The frame is private to an owner of [`UniqueFrame`].
128 ///
129 /// [`UniqueFrame`]: super::unique::UniqueFrame
130 Unique,
131 /// The provided physical address is out of bound.
132 OutOfBound,
133 /// The provided physical address is not aligned.
134 NotAligned,
135 /// Verification only: `compare_exchange` returned `Err`, retry
136 Retry,
137}
138
139pub open spec fn get_slot_spec(paddr: Paddr) -> (res: PPtr<MetaSlot>)
140 recommends
141 paddr % 4096 == 0,
142 paddr < MAX_PADDR,
143{
144 let slot = frame_to_meta(paddr);
145 PPtr(slot, PhantomData::<MetaSlot>)
146}
147
148/// Space-holder of the AnyFrameMeta virtual table.
149pub trait AnyFrameMeta: Repr<MetaSlotStorage> {
150 exec fn on_drop(&mut self) {
151 }
152
153 exec fn is_untyped(&self) -> bool {
154 false
155 }
156
157 spec fn vtable_ptr(&self) -> usize;
158}
159
160//global layout MetaSlot is size == 64, align == 8;
161pub broadcast proof fn lemma_meta_slot_size()
162 ensures
163 #[trigger] size_of::<MetaSlot>() == META_SLOT_SIZE,
164{
165 admit()
166}
167
168pub proof fn size_of_meta_slot()
169 ensures
170 size_of::<MetaSlot>() == 64,
171 align_of::<MetaSlot>() == 8,
172{
173 admit()
174}
175
176#[inline(always)]
177#[verifier::allow_in_spec]
178pub const fn meta_slot_size() -> (res: usize)
179 returns
180 64usize,
181{
182 proof {
183 size_of_meta_slot();
184 }
185 size_of::<MetaSlot>()
186}
187
188pub open spec fn has_safe_slot(paddr: Paddr) -> bool {
189 &&& paddr % PAGE_SIZE == 0
190 &&& paddr < MAX_PADDR
191}
192
193/// Gets the reference to a metadata slot.
194/// # Verified Properties
195/// ## Preconditions
196/// `paddr` is the physical address of a frame, with a valid owner.
197/// ## Postconditions
198/// If `paddr` is aligned properly and in-bounds, the function returns a pointer to its metadata slot.
199/// ## Safety
200/// Verus ensures that the pointer will only be used when we have a permission object, so creating it is safe.
201pub(super) fn get_slot(paddr: Paddr) -> (res: Result<PPtr<MetaSlot>, GetFrameError>)
202 ensures
203 has_safe_slot(paddr) <==> res is Ok,
204 res is Ok ==> res.unwrap().addr() == frame_to_meta(paddr),
205{
206 if paddr % PAGE_SIZE != 0 {
207 return Err(GetFrameError::NotAligned);
208 }
209 if paddr >= MAX_PADDR {
210 return Err(GetFrameError::OutOfBound);
211 }
212 let vaddr = frame_to_meta(paddr);
213 let ptr = PPtr::<MetaSlot>::from_addr(vaddr);
214
215 // SAFETY: `ptr` points to a valid `MetaSlot` that will never be
216 // mutably borrowed, so taking an immutable reference to it is safe.
217 Ok(ptr)
218}
219
220#[verus_verify]
221impl MetaSlot {
222 /// This is the equivalent of &self as *const as Vaddr, but we need to axiomatize it.
223 /// # Safety
224 /// It is safe to take the address of a pointer, but it may not be safe to use that
225 /// address for all purposes.
226 #[verifier::external_body]
227 fn addr_of(&self, Tracked(perm): Tracked<&vstd::simple_pptr::PointsTo<MetaSlot>>) -> Paddr
228 requires
229 self == perm.value(),
230 returns
231 perm.addr(),
232 {
233 unimplemented!()
234 }
235
236 /// A helper function that casts a `MetaSlot` pointer to a `Metadata` pointer of type `M`.
237 pub fn cast_slot<M: AnyFrameMeta + Repr<MetaSlotStorage>>(
238 &self,
239 addr: usize,
240 Tracked(perm): Tracked<&vstd::simple_pptr::PointsTo<MetaSlot>>,
241 ) -> (res: ReprPtr<MetaSlot, Metadata<M>>)
242 requires
243 perm.value() == self,
244 addr == perm.addr(),
245 ensures
246 res.ptr.addr() == addr,
247 res.addr == addr,
248 {
249 ReprPtr::<MetaSlot, Metadata<M>> { addr: addr, ptr: PPtr::from_addr(addr), _T: PhantomData }
250 }
251
252 /// A helper function that casts `MetaSlot` permission to a `Metadata` permission of type `M`.
253 pub fn cast_perm<M: AnyFrameMeta + Repr<MetaSlotStorage>>(
254 addr: usize,
255 Tracked(perm): Tracked<vstd::simple_pptr::PointsTo<MetaSlot>>,
256 Tracked(inner_perms): Tracked<MetadataInnerPerms>,
257 ) -> (res: Tracked<PointsTo<MetaSlot, Metadata<M>>>)
258 ensures
259 res@.addr == addr,
260 res@.points_to == perm,
261 res@.inner_perms == inner_perms,
262 {
263 Tracked(PointsTo { addr, points_to: perm, inner_perms, _T: PhantomData })
264 }
265
266 /// Initializes the metadata slot of a frame assuming it is unused.
267 ///
268 /// If successful, the function returns a pointer to the metadata slot.
269 /// And the slot is initialized with the given metadata.
270 ///
271 /// The resulting reference count held by the returned pointer is
272 /// [`REF_COUNT_UNIQUE`] if `as_unique_ptr` is `true`, otherwise `1`.
273 ///
274 /// # Verified Properties
275 /// ## Preconditions
276 /// - **Safety Invariant**: Metaslot region invariants must hold.
277 /// - **Bookkeeping**: The slot permissions must be available in order to check the reference count.
278 /// This precondition is stronger than it needs to be; absent permissions correspond to error cases.
279 /// ## Postconditions
280 /// - **Safety Invariant**: Metaslot region invariants hold after the call.
281 /// - **Safety**: Other slots are not affected by the call.
282 /// - **Correctness**: If successful, the function returns a pointer to the metadata slot and a permission to the slot.
283 /// - **Correctness**: If successful, the slot is initialized with the given metadata.
284 /// ## Safety
285 /// - This function returns an error if `paddr` does not correspond to a valid slot or the slot is in use.
286 /// - Accesses to the slot itself are gated by atomic checks, avoiding data races.
287 #[verus_spec(res =>
288 with Tracked(regions): Tracked<&mut MetaRegionOwners>
289 requires
290 old(regions).inv(),
291 old(regions).slots.contains_key(frame_to_index(paddr)),
292 ensures
293 regions.inv(),
294 res matches Ok((res, perm)) ==> Self::get_from_unused_perm_spec(paddr, metadata, as_unique_ptr, res, perm@),
295 res is Ok ==> Self::get_from_unused_spec(paddr, as_unique_ptr, *old(regions), *regions),
296 // If we can make the failure conditions exhaustive, we can add this as a liveness condition.
297 !has_safe_slot(paddr) ==> res is Err,
298 )]
299 pub(super) fn get_from_unused<M: AnyFrameMeta + Repr<MetaSlotStorage> + OwnerOf>(
300 paddr: Paddr,
301 metadata: M,
302 as_unique_ptr: bool,
303 ) -> (res: Result<(PPtr<Self>, Tracked<PointsTo<MetaSlot, Metadata<M>>>), GetFrameError>) {
304 let slot = get_slot(paddr)?;
305
306 proof {
307 if has_safe_slot(paddr) {
308 regions.inv_implies_correct_addr(paddr);
309 }
310 }
311
312 let tracked mut slot_own = regions.slot_owners.tracked_remove(frame_to_index(paddr));
313 let tracked mut slot_perm = regions.slots.tracked_remove(frame_to_index(paddr));
314 let tracked mut inner_perms = slot_own.take_inner_perms();
315
316 // `Acquire` pairs with the `Release` in `drop_last_in_place` and ensures the metadata
317 // initialization won't be reordered before this memory compare-and-exchange.
318 let last_ref_cnt = slot.borrow(Tracked(&slot_perm)).ref_count.compare_exchange(
319 Tracked(&mut inner_perms.ref_count),
320 REF_COUNT_UNUSED,
321 0,
322 ).map_err(
323 |val|
324 match val {
325 REF_COUNT_UNIQUE => GetFrameError::Unique,
326 0 => GetFrameError::Busy,
327 _ => GetFrameError::InUse,
328 },
329 );
330
331 if let Err(err) = last_ref_cnt {
332 proof {
333 slot_own.sync_inner(&inner_perms);
334 regions.slot_owners.tracked_insert(frame_to_index(paddr), slot_own);
335 regions.slots.tracked_insert(frame_to_index(paddr), slot_perm);
336 }
337
338 return Err(err);
339 }
340 // SAFETY: The slot now has a reference count of `0`, other threads will
341 // not access the metadata slot so it is safe to have a mutable reference.
342
343 #[verus_spec(with Tracked(&mut inner_perms.storage), Tracked(&mut inner_perms.vtable_ptr))]
344 slot.borrow(Tracked(&slot_perm)).write_meta(metadata);
345
346 if as_unique_ptr {
347 // No one can create a `Frame` instance directly from the page
348 // address, so `Relaxed` is fine here.
349 let mut contents = slot.take(Tracked(&mut slot_perm));
350 contents.ref_count.store(Tracked(&mut inner_perms.ref_count), REF_COUNT_UNIQUE);
351 slot.put(Tracked(&mut slot_perm), contents);
352 } else {
353 // `Release` is used to ensure that the metadata initialization
354 // won't be reordered after this memory store.
355 slot.borrow(Tracked(&slot_perm)).ref_count.store(
356 Tracked(&mut inner_perms.ref_count),
357 1,
358 );
359 }
360
361 proof {
362 slot_own.usage = PageUsage::Frame;
363 slot_own.sync_inner(&inner_perms);
364 regions.slot_owners.tracked_insert(frame_to_index(paddr), slot_own);
365 assert(regions.inv());
366 }
367
368 let perm = MetaSlot::cast_perm::<M>(slot.addr(), Tracked(slot_perm), Tracked(inner_perms));
369
370 Ok((slot, perm))
371 }
372
373 /// The inner loop of `Self::get_from_in_use`.
374 /// # Verified Properties
375 /// ## Preconditions
376 /// - The permission must point to the slot.
377 /// - The permission must be initialized.
378 /// - **Liveness**: The reference count of the inner permissions must not be at the maximum.
379 /// ## Postconditions
380 /// - The reference count of the inner permissions is increased by one.
381 #[verus_spec(res =>
382 with Tracked(perm): Tracked<&vstd::simple_pptr::PointsTo<MetaSlot>>,
383 Tracked(inner_perms): Tracked<&mut MetadataInnerPerms>,
384 requires
385 perm.pptr() == slot,
386 perm.is_init(),
387 perm.value().ref_count.id() == old(inner_perms).ref_count.id(),
388 // In order to not panic, the reference count shouldn't be at the maximum.
389 old(inner_perms).ref_count.value() + 1 < REF_COUNT_MAX,
390 ensures
391 res is Ok ==> inner_perms.ref_count.value() == old(inner_perms).ref_count.value() + 1,
392 res is Err ==> inner_perms.ref_count.value() == old(inner_perms).ref_count.value(),
393 inner_perms.ref_count.id() == old(inner_perms).ref_count.id(),
394 inner_perms.storage == old(inner_perms).storage,
395 inner_perms.vtable_ptr == old(inner_perms).vtable_ptr,
396 inner_perms.in_list == old(inner_perms).in_list,
397 )]
398 fn get_from_in_use_loop(slot: PPtr<MetaSlot>) -> Result<PPtr<Self>, GetFrameError> {
399 match slot.borrow(Tracked(perm)).ref_count.load(Tracked(&mut inner_perms.ref_count)) {
400 REF_COUNT_UNUSED => {
401 return Err(GetFrameError::Unused);
402 },
403 REF_COUNT_UNIQUE => {
404 return Err(GetFrameError::Unique);
405 },
406 0 => {
407 return Err(GetFrameError::Busy);
408 },
409 last_ref_cnt => {
410 if last_ref_cnt >= REF_COUNT_MAX {
411 // See `Self::inc_ref_count` for the explanation.
412 assert(false)
413 }
414 // Using `Acquire` here to pair with `get_from_unused` or
415 // `<Frame<M> as From<UniqueFrame<M>>>::from` (who must be
416 // performed after writing the metadata).
417 //
418 // It ensures that the written metadata will be visible to us.
419
420 if slot.borrow(Tracked(perm)).ref_count.compare_exchange_weak(
421 Tracked(&mut inner_perms.ref_count),
422 last_ref_cnt,
423 last_ref_cnt + 1,
424 ).is_ok() {
425 return Ok(slot);
426 } else {
427 return Err(GetFrameError::Retry);
428 }
429 },
430 }
431 }
432
433 /// Gets another owning pointer to the metadata slot from the given page.
434 /// # Verified Properties
435 /// ## Verification Design
436 /// To simplify the verification, we verify the loop body separately from the outer loop. We do not prove termination.
437 /// ## Preconditions
438 /// - **Safety Invariant**: Metaslot region invariants must hold.
439 /// - **Bookkeeping**: The slot permissions must be available in order to check the reference count.
440 /// This precondition is stronger than it needs to be; absent permissions correspond to error cases.
441 /// - **Liveness**: The reference count of the inner permissions must not be at the maximum, or the function will panic.
442 /// ## Postconditions
443 /// - **Safety**: Metaslot region invariants hold after the call.
444 /// - **Correctness**: If successful, the slot's reference count is increased by one.
445 /// - **Correctness**: If unsuccessful, the metaslot region remains unchanged.
446 /// ## Safety
447 /// The potential data race is avoided by the spin-lock.
448 #[verus_spec(res =>
449 with Tracked(regions): Tracked<&mut MetaRegionOwners>
450 requires
451 old(regions).inv(),
452 !Self::get_from_in_use_panic_cond(paddr, *old(regions)),
453 old(regions).slots.contains_key(frame_to_index(paddr)),
454 old(regions).slot_owners.contains_key(frame_to_index(paddr)),
455 old(regions).slots[frame_to_index(paddr)].addr() == frame_to_meta(paddr),
456 old(regions).slot_owners[frame_to_index(paddr)].inner_perms.ref_count.id() ==
457 old(regions).slots[frame_to_index(paddr)].value().ref_count.id(),
458 ensures
459 regions.inv(),
460 !has_safe_slot(paddr) ==> res is Err,
461 res is Ok ==> Self::get_from_in_use_success(paddr, *old(regions), *regions),
462 res matches Ok(ptr) ==> ptr == old(regions).slots[frame_to_index(paddr)].pptr(),
463 res is Err ==> *regions == *old(regions),
464 )]
465 #[verifier::exec_allows_no_decreases_clause]
466 pub(super) fn get_from_in_use(paddr: Paddr) -> Result<PPtr<Self>, GetFrameError> {
467 let ghost regions0 = *regions;
468
469 let slot = get_slot(paddr)?;
470
471 let tracked mut slot_own = regions.slot_owners.tracked_remove(frame_to_index(paddr));
472 let tracked mut slot_perm = regions.slots.tracked_remove(frame_to_index(paddr));
473 let tracked mut inner_perms = slot_own.take_inner_perms();
474
475 let ghost pre = inner_perms.ref_count.value();
476
477 // Try to increase the reference count for an in-use frame. Otherwise fail.
478 loop
479 invariant
480 has_safe_slot(paddr),
481 slot_perm.addr() == slot.addr(),
482 slot_perm.is_init(),
483 slot_perm.value().ref_count.id() == inner_perms.ref_count.id(),
484 inner_perms.ref_count.value() == pre,
485 inner_perms.ref_count.value() + 1 < REF_COUNT_MAX,
486 regions0.slots.contains_key(frame_to_index(paddr)),
487 regions0.slot_owners.contains_key(frame_to_index(paddr)),
488 regions0.inv(),
489 regions0.slots[frame_to_index(paddr)] == slot_perm,
490 !Self::get_from_in_use_panic_cond(paddr, *old(regions)),
491 {
492 match #[verus_spec(with Tracked(&slot_perm), Tracked(&mut inner_perms))]
493 Self::get_from_in_use_loop(slot) {
494 Err(GetFrameError::Retry) => {
495 core::hint::spin_loop();
496 },
497 res => {
498 proof {
499 admit();
500
501 slot_own.sync_inner(&inner_perms);
502 regions.slot_owners.tracked_insert(frame_to_index(paddr), slot_own);
503 regions.slots.tracked_insert(frame_to_index(paddr), slot_perm);
504 }
505
506 return res;
507 },
508 }
509 }
510 }
511
512 /// Increases the frame reference count by one.
513 ///
514 /// # Verified Properties
515 /// ## Preconditions
516 /// - **Bookkeeping**: The permission must match the reference count being updated.
517 /// - **Liveness**: The function will abort if the reference count is at the maximum.
518 /// ## Postconditions
519 /// - **Correctness**: The reference count is increased by one.
520 /// ## Safety
521 /// By requiring the caller to provide a permission for the reference count, we ensure that it already has a reference to the frame.
522 #[verus_spec(
523 with Tracked(rc_perm): Tracked<&mut PermissionU64>
524 )]
525 pub(super) fn inc_ref_count(&self)
526 requires
527 old(rc_perm).is_for(self.ref_count),
528 !Self::inc_ref_count_panic_cond(*old(rc_perm)),
529 ensures
530 rc_perm.value() == old(rc_perm).value() + 1,
531 {
532 let last_ref_cnt = self.ref_count.fetch_add(Tracked(rc_perm), 1);
533
534 if last_ref_cnt >= REF_COUNT_MAX {
535 // This follows the same principle as the `Arc::clone` implementation to prevent the
536 // reference count from overflowing. See also
537 // <https://doc.rust-lang.org/std/sync/struct.Arc.html#method.clone>.
538 assert(false);
539 }
540 }
541
542 /// Gets the corresponding frame's physical address.
543 ///
544 /// # Verified Properties
545 /// ## Preconditions
546 /// - **Safety**: The permission must point to a valid metadata slot.
547 /// - **Correctness**: The permission must point to the metadata slot.
548 /// ## Postconditions
549 /// - **Correctness**: The function returns the physical address of the frame.
550 /// ## Safety
551 /// The safety precondition requires that the permission points to a valid metadata slot.
552 /// This is an internal function, so it is fine to require the caller to verify this.
553 #[verus_spec(
554 with Tracked(perm): Tracked<&vstd::simple_pptr::PointsTo<MetaSlot>>,
555 requires
556 perm.value() == self,
557 Self::frame_paddr_safety_cond(*perm),
558 returns
559 meta_to_frame(perm.addr()),
560 )]
561 pub(super) fn frame_paddr(&self) -> (pa: Paddr) {
562 let addr = self.addr_of(Tracked(perm));
563 meta_to_frame(addr)
564 }
565
566 /*
567 /// Gets a dynamically typed pointer to the stored metadata.
568 ///
569 /// # Safety
570 ///
571 /// The caller should ensure that:
572 /// - the stored metadata is initialized (by [`Self::write_meta`]) and valid.
573 ///
574 /// The returned pointer should not be dereferenced as mutable unless having
575 /// exclusive access to the metadata slot.
576
577 #[verifier::external_body]
578 pub(super) unsafe fn dyn_meta_ptr<M: AnyFrameMeta>(&self) -> PPtr<M> {
579 unimplemented!()
580
581 // SAFETY: The page metadata is valid to be borrowed immutably, since
582 // it will never be borrowed mutably after initialization.
583 let vtable_ptr = unsafe { *self.vtable_ptr.get() };
584
585 // SAFETY: The page metadata is initialized and valid.
586 let vtable_ptr = *unsafe { vtable_ptr.assume_init_ref() };
587
588 let meta_ptr: *mut dyn AnyFrameMeta =
589 core::ptr::from_raw_parts_mut(self as *const MetaSlot as *mut MetaSlot, vtable_ptr);
590
591 meta_ptr
592 }*/
593 /// Gets the stored metadata as type `M`.
594 ///
595 /// # Verified Properties
596 /// ## Preconditions
597 /// - **Safety**: The caller must provide an existing permission that matches the contents of the metadata slot.
598 /// ## Postconditions
599 /// - **Correctness**: The function returns a pointer to the stored metadata, of type `M`.
600 /// ## Safety
601 /// - Calling the method is always safe, but using the returned pointer could
602 /// be unsafe. Specifically, the dereferencer should ensure that:
603 /// - the stored metadata is initialized (by [`Self::write_meta`]) and valid;
604 /// - the initialized metadata is of type `M` (`Repr<M>::wf`);
605 /// - the returned pointer should not be dereferenced as mutable unless having exclusive access to the metadata slot.
606 #[verus_spec(
607 with Tracked(perm): Tracked<&vstd::simple_pptr::PointsTo<MetaSlot>>
608 )]
609 pub(super) fn as_meta_ptr<M: AnyFrameMeta + Repr<MetaSlotStorage>>(&self) -> (res: ReprPtr<
610 MetaSlot,
611 Metadata<M>,
612 >)
613 requires
614 self == perm.value(),
615 ensures
616 res.ptr.addr() == perm.addr(),
617 res.addr == perm.addr(),
618 {
619 let addr = self.addr_of(Tracked(perm));
620 self.cast_slot(addr, Tracked(perm))
621 }
622
623 /// Writes the metadata to the slot without reading or dropping the previous value.
624 ///
625 /// # Verification Design
626 /// This function is axiomatized for now because of trait constraints.
627 /// ## Preconditions
628 /// - The caller must provide the permission token to the metadata slot's storage.
629 /// ## Postconditions
630 /// - The permission is initialized to the new metadata.
631 /// ## Safety
632 /// The caller must have exclusive access to the metadata slot's storage in order to provide the permission token.
633 #[verus_spec(
634 with Tracked(meta_perm): Tracked<&mut vstd::cell::pcell_maybe_uninit::PointsTo<MetaSlotStorage>>,
635 Tracked(vtable_perm): Tracked<&mut vstd::simple_pptr::PointsTo<usize>>
636 requires
637 self.storage.id() === old(meta_perm).id(),
638 self.vtable_ptr == old(vtable_perm).pptr(),
639 old(vtable_perm).is_uninit(),
640 ensures
641 meta_perm.id() == old(meta_perm).id(),
642 meta_perm.is_init(),
643 vtable_perm.pptr() == old(vtable_perm).pptr(),
644 vtable_perm.is_init(),
645 Metadata::<M>::metadata_from_inner_perms(*meta_perm) == metadata,
646 )]
647 #[verifier::external_body]
648 pub(super) fn write_meta<M: AnyFrameMeta + Repr<MetaSlotStorage> + OwnerOf>(
649 &self,
650 metadata: M,
651 ) {
652 // const { assert!(size_of::<M>() <= FRAME_METADATA_MAX_SIZE) };
653 // const { assert!(align_of::<M>() <= FRAME_METADATA_MAX_ALIGN) };
654 // SAFETY: Caller ensures that the access to the fields are exclusive.
655 // let vtable_ptr = unsafe { &mut *self.vtable_ptr.get() };
656 // vtable_ptr.write(core::ptr::metadata(&metadata as &dyn AnyFrameMeta));
657 let ptr = &self.storage;
658
659 // SAFETY:
660 // 1. `ptr` points to the metadata storage.
661 // 2. The size and the alignment of the metadata storage is large enough to hold `M`
662 // (guaranteed by the const assertions above).
663 // 3. We have exclusive access to the metadata storage (guaranteed by the caller).
664 //ReprPtr::<MetaSlot, M>::new_borrowed(ptr).put(Tracked(slot_own.storage.borrow_mut()), &metadata);
665 }
666
667 /// Drops the metadata and deallocates the frame.
668 ///
669 /// # Verified Properties
670 /// ## Preconditions
671 /// - **Safety Invariant**: The metadata slot must satisfy the safety invariants.
672 /// - **Safety**: The caller must provide an owner object for the metadata slot, which must include the permission for the
673 /// slot's `ref_count` field.
674 /// - **Safety**: The owner must satisfy [`drop_last_in_place_safety_cond`], which ensures that its reference count is 0
675 /// and it has no dangling raw pointers.
676 /// ## Postconditions
677 /// - **Safety**: The metadata slot satisfies the safety invariants after the call.
678 /// - **Correctness**: The reference count is set to `REF_COUNT_UNUSED` and the contents of the slot are uninitialized.
679 /// ## Safety
680 /// - By requiring the caller to provide an owner object, we ensure that it already has a reference to the frame.
681 /// - The safety precondition ensures that there are no dangling pointers, including raw pointer, guaranteeing temporal safety.
682 #[verus_spec(
683 with Tracked(owner): Tracked<&mut MetaSlotOwner>
684 )]
685 pub(super) fn drop_last_in_place(&self)
686 requires
687 old(owner).inv(),
688 self.ref_count.id() == old(owner).inner_perms.ref_count.id(),
689 Self::drop_last_in_place_safety_cond(*old(owner)),
690 ensures
691 owner.inv(),
692 owner.inner_perms.ref_count.value() == REF_COUNT_UNUSED,
693 owner.inner_perms.ref_count.id() == old(owner).inner_perms.ref_count.id(),
694 owner.inner_perms.storage.id() == old(owner).inner_perms.storage.id(),
695 owner.inner_perms.storage.is_uninit(),
696 owner.inner_perms.vtable_ptr.is_uninit(),
697 owner.inner_perms.vtable_ptr.pptr() == old(owner).inner_perms.vtable_ptr.pptr(),
698 owner.inner_perms.in_list == old(owner).inner_perms.in_list,
699 owner.self_addr == old(owner).self_addr,
700 owner.usage == old(owner).usage,
701 owner.raw_count == old(owner).raw_count,
702 owner.path_if_in_pt == old(owner).path_if_in_pt,
703 {
704 // This should be guaranteed as a safety requirement.
705 // debug_assert_eq!(self.ref_count.load(Tracked(&*rc_perm)), 0);
706 // SAFETY: The caller ensures safety.
707 #[verus_spec(with Tracked(owner))]
708 self.drop_meta_in_place();
709
710 let tracked mut inner_perms = owner.take_inner_perms();
711
712 // `Release` pairs with the `Acquire` in `Frame::from_unused` and ensures
713 // `drop_meta_in_place` won't be reordered after this memory store.
714 self.ref_count.store(Tracked(&mut inner_perms.ref_count), REF_COUNT_UNUSED);
715
716 proof {
717 owner.sync_inner(&inner_perms);
718 }
719 }
720
721 /// Drops the metadata of a slot in place.
722 ///
723 /// After this operation, the metadata becomes uninitialized. Any access to the
724 /// metadata is undefined behavior unless it is re-initialized by [`Self::write_meta`].
725 ///
726 /// # Verification Design
727 /// This function is axiomatized because of its reliance on dynamic trait methods and `VmReader`.
728 /// The latter dependency makes it part of the "bootstrap gap".
729 /// Now that Verus better supports the `dyn Trait` pattern and we have verified `VmReader`, we can revisit it.
730 /// ## Preconditions
731 /// - The caller must provide an owner object for the metadata slot.
732 /// - The reference count must be 0
733 /// ## Safety
734 ///
735 /// The caller should ensure that:
736 /// - the reference count is `0` (so we are the sole owner of the frame);
737 /// - the metadata is initialized;
738 #[verifier::external_body]
739 #[verus_spec(
740 with Tracked(slot_own): Tracked<&mut MetaSlotOwner>
741 requires
742 old(slot_own).inner_perms.ref_count.value() == 0 || old(slot_own).inner_perms.ref_count.value() == REF_COUNT_UNIQUE,
743 old(slot_own).inner_perms.storage.is_init(),
744 old(slot_own).inner_perms.in_list.value() == 0,
745 ensures
746 slot_own.inner_perms.ref_count == old(slot_own).inner_perms.ref_count,
747 slot_own.inner_perms.storage.is_uninit(),
748 slot_own.inner_perms.storage.id() == old(slot_own).inner_perms.storage.id(),
749 slot_own.inner_perms.in_list == old(slot_own).inner_perms.in_list,
750 slot_own.inner_perms.vtable_ptr.is_uninit(),
751 slot_own.inner_perms.vtable_ptr.pptr() == old(slot_own).inner_perms.vtable_ptr.pptr(),
752 slot_own.self_addr == old(slot_own).self_addr,
753 slot_own.usage == old(slot_own).usage,
754 slot_own.raw_count == old(slot_own).raw_count,
755 slot_own.path_if_in_pt == old(slot_own).path_if_in_pt,
756 )]
757 #[verifier::external_body]
758 pub(super) fn drop_meta_in_place(&self) {
759 unimplemented!()/* let paddr = self.frame_paddr();
760
761 // SAFETY: We have exclusive access to the frame metadata.
762 let vtable_ptr = unsafe { &mut *self.vtable_ptr.get() };
763 // SAFETY: The frame metadata is initialized and valid.
764 let vtable_ptr = unsafe { vtable_ptr.assume_init_read() };
765
766 let meta_ptr: *mut dyn AnyFrameMeta =
767 core::ptr::from_raw_parts_mut(self.storage.get(), vtable_ptr);
768
769 // SAFETY: The implementer of the frame metadata decides that if the frame
770 // is safe to be read or not.
771 let mut reader =
772 unsafe { VmReader::from_kernel_space(paddr_to_vaddr(paddr) as *const u8, PAGE_SIZE) };
773
774 // SAFETY: `ptr` points to the metadata storage which is valid to be mutably borrowed under
775 // `vtable_ptr` because the metadata is valid, the vtable is correct, and we have the exclusive
776 // access to the frame metadata.
777 unsafe {
778 // Invoke the custom `on_drop` handler.
779 (*meta_ptr).on_drop(&mut reader);
780 // Drop the frame metadata.
781 core::ptr::drop_in_place(meta_ptr);
782 }*/
783
784 }
785}
786
787/// The metadata of frames that holds metadata of frames.
788#[derive(Debug, Default)]
789pub struct MetaPageMeta {}
790
791//impl_frame_meta_for!(MetaPageMeta);
792/*
793/// Initializes the metadata of all physical frames.
794///
795/// The function returns a list of `Frame`s containing the metadata.
796///
797/// # Safety
798///
799/// This function should be called only once and only on the BSP,
800/// before any APs are started.
801pub(crate) unsafe fn init() -> Segment<MetaPageMeta> {
802 let max_paddr = {
803 let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions;
804 regions
805 .iter()
806 .filter(|r| r.typ() == MemoryRegionType::Usable)
807 .map(|r| r.base() + r.len())
808 .max()
809 .unwrap()
810 };
811
812 info!(
813 "Initializing frame metadata for physical memory up to {:x}",
814 max_paddr
815 );
816
817 // In RISC-V, the boot page table has mapped the 512GB memory,
818 // so we don't need to add temporary linear mapping.
819 // In LoongArch, the DWM0 has mapped the whole memory,
820 // so we don't need to add temporary linear mapping.
821 #[cfg(target_arch = "x86_64")]
822 add_temp_linear_mapping(max_paddr);
823
824 let tot_nr_frames = max_paddr / page_size::<PagingConsts>(1);
825 let (nr_meta_pages, meta_pages) = alloc_meta_frames(tot_nr_frames);
826
827 // Map the metadata frames.
828 boot_pt::with_borrow(|boot_pt| {
829 for i in 0..nr_meta_pages {
830 let frame_paddr = meta_pages + i * PAGE_SIZE;
831 let vaddr = frame_to_meta::<PagingConsts>(0) + i * PAGE_SIZE;
832 let prop = PageProperty {
833 flags: PageFlags::RW,
834 cache: CachePolicy::Writeback,
835 priv_flags: PrivilegedPageFlags::GLOBAL,
836 };
837 // SAFETY: we are doing the metadata mappings for the kernel.
838 unsafe { boot_pt.map_base_page(vaddr, frame_paddr / PAGE_SIZE, prop) };
839 }
840 })
841 .unwrap();
842
843 // Now the metadata frames are mapped, we can initialize the metadata.
844 super::MAX_PADDR.store(max_paddr, Ordering::Relaxed);
845
846 let meta_page_range = meta_pages..meta_pages + nr_meta_pages * PAGE_SIZE;
847
848 let (range_1, range_2) = allocator::EARLY_ALLOCATOR
849 .lock()
850 .as_ref()
851 .unwrap()
852 .allocated_regions();
853 for r in range_difference(&range_1, &meta_page_range) {
854 let early_seg = Segment::from_unused(r, |_| EarlyAllocatedFrameMeta).unwrap();
855 let _ = ManuallyDrop::new(early_seg);
856 }
857 for r in range_difference(&range_2, &meta_page_range) {
858 let early_seg = Segment::from_unused(r, |_| EarlyAllocatedFrameMeta).unwrap();
859 let _ = ManuallyDrop::new(early_seg);
860 }
861
862 mark_unusable_ranges();
863
864 Segment::from_unused(meta_page_range, |_| MetaPageMeta {}).unwrap()
865}
866
867/// Returns whether the global frame allocator is initialized.
868pub(in crate::mm) fn is_initialized() -> bool {
869 // `init` sets it with relaxed ordering somewhere in the middle. But due
870 // to the safety requirement of the `init` function, we can assume that
871 // there is no race conditions.
872 super::MAX_PADDR.load(Ordering::Relaxed) != 0
873}
874
875fn alloc_meta_frames(tot_nr_frames: usize) -> (usize, Paddr) {
876 let nr_meta_pages = tot_nr_frames
877 .checked_mul(size_of::<MetaSlot>())
878 .unwrap()
879 .div_ceil(PAGE_SIZE);
880 let start_paddr = allocator::early_alloc(
881 Layout::from_size_align(nr_meta_pages * PAGE_SIZE, PAGE_SIZE).unwrap(),
882 )
883 .unwrap();
884
885 let slots = paddr_to_vaddr(start_paddr) as *mut MetaSlot;
886
887 // Initialize the metadata slots.
888 for i in 0..tot_nr_frames {
889 // SAFETY: The memory is successfully allocated with `tot_nr_frames`
890 // slots so the index must be within the range.
891 let slot = unsafe { slots.add(i) };
892 // SAFETY: The memory is just allocated so we have exclusive access and
893 // it's valid for writing.
894 unsafe {
895 slot.write(MetaSlot {
896 storage: UnsafeCell::new(MetaSlotStorage::Empty([0; FRAME_METADATA_MAX_SIZE - 1])),
897 ref_count: AtomicU64::new(REF_COUNT_UNUSED),
898 vtable_ptr: UnsafeCell::new(MaybeUninit::uninit()),
899 in_list: AtomicU64::new(0),
900 })
901 };
902 }
903
904 (nr_meta_pages, start_paddr)
905}
906
907/// Unusable memory metadata. Cannot be used for any purposes.
908#[derive(Debug)]
909pub struct UnusableMemoryMeta;
910impl_frame_meta_for!(UnusableMemoryMeta);
911
912/// Reserved memory metadata. Maybe later used as I/O memory.
913#[derive(Debug)]
914pub struct ReservedMemoryMeta;
915impl_frame_meta_for!(ReservedMemoryMeta);
916
917/// The metadata of physical pages that contains the kernel itself.
918#[derive(Debug, Default)]
919pub struct KernelMeta;
920impl_frame_meta_for!(KernelMeta);
921
922macro_rules! mark_ranges {
923 ($region: expr, $typ: expr) => {{
924 debug_assert!($region.base() % PAGE_SIZE == 0);
925 debug_assert!($region.len() % PAGE_SIZE == 0);
926
927 let seg = Segment::from_unused($region.base()..$region.end(), |_| $typ).unwrap();
928 let _ = ManuallyDrop::new(seg);
929 }};
930}
931
932fn mark_unusable_ranges() {
933 let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions;
934
935 for region in regions
936 .iter()
937 .rev()
938 .skip_while(|r| r.typ() != MemoryRegionType::Usable)
939 {
940 match region.typ() {
941 MemoryRegionType::BadMemory => mark_ranges!(region, UnusableMemoryMeta),
942 MemoryRegionType::Unknown => mark_ranges!(region, ReservedMemoryMeta),
943 MemoryRegionType::NonVolatileSleep => mark_ranges!(region, UnusableMemoryMeta),
944 MemoryRegionType::Reserved => mark_ranges!(region, ReservedMemoryMeta),
945 MemoryRegionType::Kernel => mark_ranges!(region, KernelMeta),
946 MemoryRegionType::Module => mark_ranges!(region, UnusableMemoryMeta),
947 MemoryRegionType::Framebuffer => mark_ranges!(region, ReservedMemoryMeta),
948 MemoryRegionType::Reclaimable => mark_ranges!(region, UnusableMemoryMeta),
949 MemoryRegionType::Usable => {} // By default it is initialized as usable.
950 }
951 }
952}
953
954/// Adds a temporary linear mapping for the metadata frames.
955///
956/// We only assume boot page table to contain 4G linear mapping. Thus if the
957/// physical memory is huge we end up depleted of linear virtual memory for
958/// initializing metadata.
959#[cfg(target_arch = "x86_64")]
960fn add_temp_linear_mapping(max_paddr: Paddr) {
961 const PADDR4G: Paddr = 0x1_0000_0000;
962
963 if max_paddr <= PADDR4G {
964 return;
965 }
966
967 // TODO: We don't know if the allocator would allocate from low to high or
968 // not. So we prepare all linear mappings in the boot page table. Hope it
969 // won't drag the boot performance much.
970 let end_paddr = max_paddr.align_up(PAGE_SIZE);
971 let prange = PADDR4G..end_paddr;
972 let prop = PageProperty {
973 flags: PageFlags::RW,
974 cache: CachePolicy::Writeback,
975 priv_flags: PrivilegedPageFlags::GLOBAL,
976 };
977
978 // SAFETY: we are doing the linear mapping for the kernel.
979 unsafe {
980 boot_pt::with_borrow(|boot_pt| {
981 for paddr in prange.step_by(PAGE_SIZE) {
982 let vaddr = LINEAR_MAPPING_BASE_VADDR + paddr;
983 boot_pt.map_base_page(vaddr, paddr / PAGE_SIZE, prop);
984 }
985 })
986 .unwrap();
987 }
988}
989*/
990} // verus!