ostd/mm/frame/meta.rs
1// SPDX-License-Identifier: MPL-2.0
2
3//! Metadata management of frames.
4//!
5//! You can picture a globally shared, static, gigantic array of metadata
6//! initialized for each frame.
7//! Each entry in this array holds the metadata for a single frame.
8//! There would be a dedicated small
9//! "heap" space in each slot for dynamic metadata. You can store anything as
10//! the metadata of a frame as long as it's [`Sync`].
11//!
12//! # Implementation
13//!
14//! The slots are placed in the metadata pages mapped to a certain virtual
15//! address in the kernel space. So finding the metadata of a frame often
16//! comes with no costs since the translation is a simple arithmetic operation.
17
18pub(crate) mod mapping {
19 //! The metadata of each physical page is linear mapped to fixed virtual addresses
20 //! in [`FRAME_METADATA_RANGE`].
21
22 use super::MetaSlot;
23 use crate::mm::{PAGE_SIZE, Paddr, PagingConstsTrait, Vaddr, kspace::FRAME_METADATA_RANGE};
24
25 /// Converts a physical address of a base frame to the virtual address of the metadata slot.
26 pub(crate) const fn frame_to_meta<C: PagingConstsTrait>(paddr: Paddr) -> Vaddr {
27 let base = FRAME_METADATA_RANGE.start;
28 let offset = paddr / PAGE_SIZE;
29 base + offset * size_of::<MetaSlot>()
30 }
31
32 /// Converts a virtual address of the metadata slot to the physical address of the frame.
33 pub(crate) const fn meta_to_frame<C: PagingConstsTrait>(vaddr: Vaddr) -> Paddr {
34 let base = FRAME_METADATA_RANGE.start;
35 let offset = (vaddr - base) / size_of::<MetaSlot>();
36 offset * PAGE_SIZE
37 }
38}
39
40use core::{
41 alloc::Layout,
42 any::Any,
43 cell::UnsafeCell,
44 fmt::Debug,
45 mem::{ManuallyDrop, MaybeUninit, size_of},
46 result::Result,
47 sync::atomic::{AtomicU64, Ordering},
48};
49
50use crate::{
51 arch::mm::PagingConsts,
52 boot::memory_region::MemoryRegionType,
53 const_assert, info,
54 mm::{
55 CachePolicy, Infallible, PAGE_SIZE, Paddr, PageFlags, PageProperty, PrivilegedPageFlags,
56 Segment, Vaddr, VmReader,
57 frame::allocator::{self, EarlyAllocatedFrameMeta},
58 paddr_to_vaddr, page_size,
59 page_table::boot_pt,
60 },
61 panic::abort,
62 util::ops::range_difference,
63};
64
65/// The maximum number of bytes of the metadata of a frame.
66pub const FRAME_METADATA_MAX_SIZE: usize = META_SLOT_SIZE
67 - size_of::<AtomicU64>()
68 - size_of::<FrameMetaVtablePtr>()
69 - size_of::<AtomicU64>();
70/// The maximum alignment in bytes of the metadata of a frame.
71pub const FRAME_METADATA_MAX_ALIGN: usize = META_SLOT_SIZE;
72
73const META_SLOT_SIZE: usize = 64;
74
75#[repr(C)]
76pub(in crate::mm) struct MetaSlot {
77 /// The metadata of a frame.
78 ///
79 /// It is placed at the beginning of a slot because:
80 /// - the implementation can simply cast a `*const MetaSlot`
81 /// to a `*const AnyFrameMeta` for manipulation;
82 /// - if the metadata need special alignment, we can provide
83 /// at most [`FRAME_METADATA_MAX_ALIGN`] bytes of alignment;
84 /// - the subsequent fields can utilize the padding of the
85 /// reference count to save space.
86 ///
87 /// Don't interpret this field as an array of bytes. It is a
88 /// placeholder for the metadata of a frame.
89 storage: UnsafeCell<[u8; FRAME_METADATA_MAX_SIZE]>,
90 /// The reference count of the page.
91 ///
92 /// Specifically, the reference count has the following meaning:
93 /// - `REF_COUNT_UNUSED`: The page is not in use.
94 /// - `REF_COUNT_UNIQUE`: The page is owned by a [`UniqueFrame`].
95 /// - `0`: The page is being constructed ([`Frame::from_unused`])
96 /// or destructured ([`drop_last_in_place`]).
97 /// - `1..REF_COUNT_MAX`: The page is in use.
98 /// - `REF_COUNT_MAX..REF_COUNT_UNIQUE`: Illegal values to
99 /// prevent the reference count from overflowing. Otherwise,
100 /// overflowing the reference count will cause soundness issue.
101 ///
102 /// [`Frame::from_unused`]: super::Frame::from_unused
103 /// [`UniqueFrame`]: super::unique::UniqueFrame
104 /// [`drop_last_in_place`]: Self::drop_last_in_place
105 //
106 // Other than this field the fields should be `MaybeUninit`.
107 // See initialization in `alloc_meta_frames`.
108 pub(super) ref_count: AtomicU64,
109 /// The virtual table that indicates the type of the metadata.
110 pub(super) vtable_ptr: UnsafeCell<MaybeUninit<FrameMetaVtablePtr>>,
111 /// This is only accessed by [`crate::mm::frame::linked_list`].
112 /// It stores 0 if the frame is not in any list, otherwise it stores the
113 /// ID of the list.
114 ///
115 /// It is ugly but allows us to tell if a frame is in a specific list by
116 /// one relaxed read. Otherwise, if we store it conditionally in `storage`
117 /// we would have to ensure that the type is correct before the read, which
118 /// costs a synchronization.
119 pub(super) in_list: AtomicU64,
120}
121
122pub(super) const REF_COUNT_UNUSED: u64 = u64::MAX;
123pub(super) const REF_COUNT_UNIQUE: u64 = u64::MAX - 1;
124pub(super) const REF_COUNT_MAX: u64 = i64::MAX as u64;
125
126type FrameMetaVtablePtr = core::ptr::DynMetadata<dyn AnyFrameMeta>;
127
128const_assert!(PAGE_SIZE.is_multiple_of(META_SLOT_SIZE));
129const_assert!(size_of::<MetaSlot>() == META_SLOT_SIZE);
130
131/// All frame metadata types must implement this trait.
132///
133/// If a frame type needs specific drop behavior, it should specify
134/// when implementing this trait. When we drop the last handle to
135/// this frame, the `on_drop` method will be called. The `on_drop`
136/// method is called with the physical address of the frame.
137///
138/// The implemented structure should have a size less than or equal to
139/// [`FRAME_METADATA_MAX_SIZE`] and an alignment less than or equal to
140/// [`FRAME_METADATA_MAX_ALIGN`]. Otherwise, the metadata type cannot
141/// be used because storing it will fail compile-time assertions.
142///
143/// # Safety
144///
145/// If `on_drop` reads the page using the provided `VmReader`, the
146/// implementer must ensure that the frame is safe to read.
147pub unsafe trait AnyFrameMeta: Any + Send + Sync {
148 /// Called when the last handle to the frame is dropped.
149 fn on_drop(&mut self, _reader: &mut VmReader<Infallible>) {}
150
151 /// Whether the metadata's associated frame is untyped.
152 ///
153 /// If a type implements [`AnyUFrameMeta`], this should be `true`.
154 /// Otherwise, it should be `false`.
155 ///
156 /// [`AnyUFrameMeta`]: super::untyped::AnyUFrameMeta
157 fn is_untyped(&self) -> bool {
158 false
159 }
160}
161
162/// Checks that a frame metadata type has valid size and alignment.
163#[macro_export]
164macro_rules! check_frame_meta_layout {
165 ($t:ty) => {
166 $crate::const_assert!(size_of::<$t>() <= $crate::mm::frame::meta::FRAME_METADATA_MAX_SIZE);
167 $crate::const_assert!(
168 $crate::mm::frame::meta::FRAME_METADATA_MAX_ALIGN % align_of::<$t>() == 0
169 );
170 };
171}
172
173/// Makes a structure usable as a frame metadata.
174#[macro_export]
175macro_rules! impl_frame_meta_for {
176 // Implement without specifying the drop behavior.
177 ($t:ty) => {
178 // SAFETY: `on_drop` won't read the page.
179 unsafe impl $crate::mm::frame::meta::AnyFrameMeta for $t {}
180
181 $crate::check_frame_meta_layout!($t);
182 };
183}
184
185pub use impl_frame_meta_for;
186
187/// The error type for getting the frame from a physical address.
188#[derive(Debug)]
189pub enum GetFrameError {
190 /// The frame is in use.
191 InUse,
192 /// The frame is not in use.
193 Unused,
194 /// The frame is being initialized or destructed.
195 Busy,
196 /// The frame is private to an owner of [`UniqueFrame`].
197 ///
198 /// [`UniqueFrame`]: super::unique::UniqueFrame
199 Unique,
200 /// The provided physical address is out of bound.
201 OutOfBound,
202 /// The provided physical address is not aligned.
203 NotAligned,
204}
205
206/// Gets the reference to a metadata slot.
207pub(super) fn get_slot(paddr: Paddr) -> Result<&'static MetaSlot, GetFrameError> {
208 if !paddr.is_multiple_of(PAGE_SIZE) {
209 return Err(GetFrameError::NotAligned);
210 }
211 if paddr >= super::max_paddr() {
212 return Err(GetFrameError::OutOfBound);
213 }
214
215 let vaddr = mapping::frame_to_meta::<PagingConsts>(paddr);
216 let ptr = vaddr as *mut MetaSlot;
217
218 // SAFETY: `ptr` points to a valid `MetaSlot` that will never be
219 // mutably borrowed, so taking an immutable reference to it is safe.
220 Ok(unsafe { &*ptr })
221}
222
223impl MetaSlot {
224 /// Initializes the metadata slot of a frame assuming it is unused.
225 ///
226 /// If successful, the function returns a pointer to the metadata slot.
227 /// And the slot is initialized with the given metadata.
228 ///
229 /// The resulting reference count held by the returned pointer is
230 /// [`REF_COUNT_UNIQUE`] if `as_unique_ptr` is `true`, otherwise `1`.
231 pub(super) fn get_from_unused<M: AnyFrameMeta>(
232 paddr: Paddr,
233 metadata: M,
234 as_unique_ptr: bool,
235 ) -> Result<*const Self, GetFrameError> {
236 let slot = get_slot(paddr)?;
237
238 // `Acquire` pairs with the `Release` in `drop_last_in_place` and ensures the metadata
239 // initialization won't be reordered before this memory compare-and-exchange.
240 slot.ref_count
241 .compare_exchange(REF_COUNT_UNUSED, 0, Ordering::Acquire, Ordering::Relaxed)
242 .map_err(|val| match val {
243 REF_COUNT_UNIQUE => GetFrameError::Unique,
244 0 => GetFrameError::Busy,
245 _ => GetFrameError::InUse,
246 })?;
247
248 // SAFETY: The slot now has a reference count of `0`, other threads will
249 // not access the metadata slot so it is safe to have a mutable reference.
250 unsafe { slot.write_meta(metadata) };
251
252 if as_unique_ptr {
253 // No one can create a `Frame` instance directly from the page
254 // address, so `Relaxed` is fine here.
255 slot.ref_count.store(REF_COUNT_UNIQUE, Ordering::Relaxed);
256 } else {
257 // `Release` is used to ensure that the metadata initialization
258 // won't be reordered after this memory store.
259 slot.ref_count.store(1, Ordering::Release);
260 }
261
262 Ok(slot as *const MetaSlot)
263 }
264
265 /// Gets another owning pointer to the metadata slot from the given page.
266 pub(super) fn get_from_in_use(paddr: Paddr) -> Result<*const Self, GetFrameError> {
267 let slot = get_slot(paddr)?;
268
269 // Try to increase the reference count for an in-use frame. Otherwise fail.
270 loop {
271 match slot.ref_count.load(Ordering::Relaxed) {
272 REF_COUNT_UNUSED => return Err(GetFrameError::Unused),
273 REF_COUNT_UNIQUE => return Err(GetFrameError::Unique),
274 0 => return Err(GetFrameError::Busy),
275 last_ref_cnt => {
276 if last_ref_cnt >= REF_COUNT_MAX {
277 // See `Self::inc_ref_count` for the explanation.
278 abort();
279 }
280 // Using `Acquire` here to pair with `get_from_unused` or
281 // `<Frame<M> as From<UniqueFrame<M>>>::from` (who must be
282 // performed after writing the metadata).
283 //
284 // It ensures that the written metadata will be visible to us.
285 if slot
286 .ref_count
287 .compare_exchange_weak(
288 last_ref_cnt,
289 last_ref_cnt + 1,
290 Ordering::Acquire,
291 Ordering::Relaxed,
292 )
293 .is_ok()
294 {
295 return Ok(slot as *const MetaSlot);
296 }
297 }
298 }
299 core::hint::spin_loop();
300 }
301 }
302
303 /// Increases the frame reference count by one.
304 ///
305 /// # Safety
306 ///
307 /// The caller must have already held a reference to the frame.
308 pub(super) unsafe fn inc_ref_count(&self) {
309 let last_ref_cnt = self.ref_count.fetch_add(1, Ordering::Relaxed);
310 debug_assert!(last_ref_cnt != 0 && last_ref_cnt != REF_COUNT_UNUSED);
311
312 if last_ref_cnt >= REF_COUNT_MAX {
313 // This follows the same principle as the `Arc::clone` implementation to prevent the
314 // reference count from overflowing. See also
315 // <https://doc.rust-lang.org/std/sync/struct.Arc.html#method.clone>.
316 abort();
317 }
318 }
319
320 /// Gets the corresponding frame's physical address.
321 pub(super) fn frame_paddr(&self) -> Paddr {
322 mapping::meta_to_frame::<PagingConsts>(self as *const MetaSlot as Vaddr)
323 }
324
325 /// Gets a dynamically typed pointer to the stored metadata.
326 ///
327 /// # Safety
328 ///
329 /// The caller should ensure that:
330 /// - the stored metadata is initialized (by [`Self::write_meta`]) and valid.
331 ///
332 /// The returned pointer should not be dereferenced as mutable unless having
333 /// exclusive access to the metadata slot.
334 pub(super) unsafe fn dyn_meta_ptr(&self) -> *mut dyn AnyFrameMeta {
335 // SAFETY: The page metadata is valid to be borrowed immutably, since
336 // it will never be borrowed mutably after initialization.
337 let vtable_ptr = unsafe { *self.vtable_ptr.get() };
338
339 // SAFETY: The page metadata is initialized and valid.
340 let vtable_ptr = *unsafe { vtable_ptr.assume_init_ref() };
341
342 let meta_ptr: *mut dyn AnyFrameMeta =
343 core::ptr::from_raw_parts_mut(self as *const MetaSlot as *mut MetaSlot, vtable_ptr);
344
345 meta_ptr
346 }
347
348 /// Gets the stored metadata as type `M`.
349 ///
350 /// Calling the method should be safe, but using the returned pointer would
351 /// be unsafe. Specifically, the derefernecer should ensure that:
352 /// - the stored metadata is initialized (by [`Self::write_meta`]) and
353 /// valid;
354 /// - the initialized metadata is of type `M`;
355 /// - the returned pointer should not be dereferenced as mutable unless
356 /// having exclusive access to the metadata slot.
357 pub(super) fn as_meta_ptr<M: AnyFrameMeta>(&self) -> *mut M {
358 self.storage.get() as *mut M
359 }
360
361 /// Writes the metadata to the slot without reading or dropping the previous value.
362 ///
363 /// # Safety
364 ///
365 /// The caller should have exclusive access to the metadata slot's fields.
366 pub(super) unsafe fn write_meta<M: AnyFrameMeta>(&self, metadata: M) {
367 const { assert!(size_of::<M>() <= FRAME_METADATA_MAX_SIZE) };
368 const { assert!(align_of::<M>() <= FRAME_METADATA_MAX_ALIGN) };
369
370 // SAFETY: Caller ensures that the access to the fields are exclusive.
371 let vtable_ptr = unsafe { &mut *self.vtable_ptr.get() };
372 vtable_ptr.write(core::ptr::metadata(&metadata as &dyn AnyFrameMeta));
373
374 let ptr = self.storage.get();
375 // SAFETY:
376 // 1. `ptr` points to the metadata storage.
377 // 2. The size and the alignment of the metadata storage is large enough to hold `M`
378 // (guaranteed by the const assertions above).
379 // 3. We have exclusive access to the metadata storage (guaranteed by the caller).
380 unsafe { ptr.cast::<M>().write(metadata) };
381 }
382
383 /// Drops the metadata and deallocates the frame.
384 ///
385 /// # Safety
386 ///
387 /// The caller should ensure that:
388 /// - the reference count is `0` (so we are the sole owner of the frame);
389 /// - the metadata is initialized;
390 pub(super) unsafe fn drop_last_in_place(&self) {
391 // This should be guaranteed as a safety requirement.
392 debug_assert_eq!(self.ref_count.load(Ordering::Relaxed), 0);
393
394 // SAFETY: The caller ensures safety.
395 unsafe { self.drop_meta_in_place() };
396
397 // `Release` pairs with the `Acquire` in `Frame::from_unused` and ensures
398 // `drop_meta_in_place` won't be reordered after this memory store.
399 self.ref_count.store(REF_COUNT_UNUSED, Ordering::Release);
400 }
401
402 /// Drops the metadata of a slot in place.
403 ///
404 /// After this operation, the metadata becomes uninitialized. Any access to the
405 /// metadata is undefined behavior unless it is re-initialized by [`Self::write_meta`].
406 ///
407 /// # Safety
408 ///
409 /// The caller should ensure that:
410 /// - the reference count is `0` (so we are the sole owner of the frame);
411 /// - the metadata is initialized;
412 pub(super) unsafe fn drop_meta_in_place(&self) {
413 let paddr = self.frame_paddr();
414
415 // SAFETY: We have exclusive access to the frame metadata.
416 let vtable_ptr = unsafe { &mut *self.vtable_ptr.get() };
417 // SAFETY: The frame metadata is initialized and valid.
418 let vtable_ptr = unsafe { vtable_ptr.assume_init_read() };
419
420 let meta_ptr: *mut dyn AnyFrameMeta =
421 core::ptr::from_raw_parts_mut(self.storage.get(), vtable_ptr);
422
423 // SAFETY: The implementer of the frame metadata decides that if the frame
424 // is safe to be read or not.
425 let mut reader =
426 unsafe { VmReader::from_kernel_space(paddr_to_vaddr(paddr) as *const u8, PAGE_SIZE) };
427
428 // SAFETY: `ptr` points to the metadata storage which is valid to be mutably borrowed under
429 // `vtable_ptr` because the metadata is valid, the vtable is correct, and we have the exclusive
430 // access to the frame metadata.
431 unsafe {
432 // Invoke the custom `on_drop` handler.
433 (*meta_ptr).on_drop(&mut reader);
434 // Drop the frame metadata.
435 core::ptr::drop_in_place(meta_ptr);
436 }
437 }
438}
439
440/// The metadata of frames that holds metadata of frames.
441#[derive(Debug, Default)]
442pub struct MetaPageMeta {}
443
444impl_frame_meta_for!(MetaPageMeta);
445
446/// Initializes the metadata of all physical frames.
447///
448/// The function returns a list of `Frame`s containing the metadata.
449///
450/// # Safety
451///
452/// This function should be called only once and only on the BSP,
453/// before any APs are started.
454pub(crate) unsafe fn init() -> Segment<MetaPageMeta> {
455 let max_paddr = {
456 let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions;
457 regions
458 .iter()
459 .filter(|r| r.typ().is_physical())
460 .map(|r| r.base() + r.len())
461 .max()
462 .unwrap()
463 };
464
465 info!(
466 "Initializing frame metadata for physical memory up to {:x}",
467 max_paddr
468 );
469
470 // In RISC-V, the boot page table has mapped the 512GB memory,
471 // so we don't need to add temporary linear mapping.
472 // In LoongArch, the DWM0 has mapped the whole memory,
473 // so we don't need to add temporary linear mapping.
474 #[cfg(target_arch = "x86_64")]
475 add_temp_linear_mapping(max_paddr);
476
477 let tot_nr_frames = max_paddr / page_size::<PagingConsts>(1);
478 let (nr_meta_pages, meta_pages) = alloc_meta_frames(tot_nr_frames);
479
480 // Map the metadata frames.
481 boot_pt::with_borrow(|boot_pt| {
482 for i in 0..nr_meta_pages {
483 let frame_paddr = meta_pages + i * PAGE_SIZE;
484 let vaddr = mapping::frame_to_meta::<PagingConsts>(0) + i * PAGE_SIZE;
485 let prop = PageProperty {
486 flags: PageFlags::RW,
487 cache: CachePolicy::Writeback,
488 priv_flags: PrivilegedPageFlags::GLOBAL,
489 };
490 // SAFETY: we are doing the metadata mappings for the kernel.
491 unsafe { boot_pt.map_base_page(vaddr, frame_paddr, prop) };
492 }
493 })
494 .unwrap();
495
496 // Now the metadata frames are mapped, we can initialize the metadata.
497 super::MAX_PADDR.store(max_paddr, Ordering::Relaxed);
498
499 let meta_page_range = meta_pages..meta_pages + nr_meta_pages * PAGE_SIZE;
500
501 let (range_1, range_2) = allocator::EARLY_ALLOCATOR
502 .lock()
503 .as_ref()
504 .unwrap()
505 .allocated_regions();
506 for r in range_difference(&range_1, &meta_page_range) {
507 let early_seg = Segment::from_unused(r, |_| EarlyAllocatedFrameMeta).unwrap();
508 let _ = ManuallyDrop::new(early_seg);
509 }
510 for r in range_difference(&range_2, &meta_page_range) {
511 let early_seg = Segment::from_unused(r, |_| EarlyAllocatedFrameMeta).unwrap();
512 let _ = ManuallyDrop::new(early_seg);
513 }
514
515 mark_unusable_ranges();
516
517 Segment::from_unused(meta_page_range, |_| MetaPageMeta {}).unwrap()
518}
519
520/// Returns whether the global frame allocator is initialized.
521pub(in crate::mm) fn is_initialized() -> bool {
522 // `init` sets it with relaxed ordering somewhere in the middle. But due
523 // to the safety requirement of the `init` function, we can assume that
524 // there is no race conditions.
525 super::MAX_PADDR.load(Ordering::Relaxed) != 0
526}
527
528fn alloc_meta_frames(tot_nr_frames: usize) -> (usize, Paddr) {
529 let nr_meta_pages = tot_nr_frames
530 .checked_mul(size_of::<MetaSlot>())
531 .unwrap()
532 .div_ceil(PAGE_SIZE);
533 let paddr = allocator::early_alloc(
534 Layout::from_size_align(nr_meta_pages * PAGE_SIZE, PAGE_SIZE).unwrap(),
535 )
536 .unwrap();
537
538 let slots = paddr_to_vaddr(paddr) as *mut MetaSlot;
539
540 // Initialize the metadata slots.
541 for i in 0..tot_nr_frames {
542 // SAFETY: The memory is successfully allocated with `tot_nr_frames`
543 // slots so the index must be within the range.
544 let slot = unsafe { slots.add(i) };
545 // SAFETY: The memory is just allocated so we have exclusive access and
546 // it's valid for writing.
547 unsafe {
548 slot.write(MetaSlot {
549 storage: UnsafeCell::new([0; FRAME_METADATA_MAX_SIZE]),
550 ref_count: AtomicU64::new(REF_COUNT_UNUSED),
551 vtable_ptr: UnsafeCell::new(MaybeUninit::uninit()),
552 in_list: AtomicU64::new(0),
553 })
554 };
555 }
556
557 (nr_meta_pages, paddr)
558}
559
560/// Unusable memory metadata. Cannot be used for any purposes.
561#[derive(Debug)]
562pub struct UnusableMemoryMeta;
563impl_frame_meta_for!(UnusableMemoryMeta);
564
565/// Reserved memory metadata. Maybe later used as I/O memory.
566#[derive(Debug)]
567pub struct ReservedMemoryMeta;
568impl_frame_meta_for!(ReservedMemoryMeta);
569
570/// The metadata of physical pages that contains the kernel itself.
571#[derive(Debug, Default)]
572pub struct KernelMeta;
573impl_frame_meta_for!(KernelMeta);
574
575macro_rules! mark_ranges {
576 ($region: expr, $typ: expr) => {{
577 debug_assert!($region.base().is_multiple_of(PAGE_SIZE));
578 debug_assert!($region.len().is_multiple_of(PAGE_SIZE));
579
580 let seg = Segment::from_unused($region.base()..$region.end(), |_| $typ).unwrap();
581 let _ = ManuallyDrop::new(seg);
582 }};
583}
584
585fn mark_unusable_ranges() {
586 let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions;
587
588 for region in regions.iter().rev().skip_while(|r| !r.typ().is_physical()) {
589 match region.typ() {
590 MemoryRegionType::BadMemory => mark_ranges!(region, UnusableMemoryMeta),
591 MemoryRegionType::Unknown => mark_ranges!(region, ReservedMemoryMeta),
592 MemoryRegionType::NonVolatileSleep => mark_ranges!(region, UnusableMemoryMeta),
593 MemoryRegionType::Reserved => mark_ranges!(region, ReservedMemoryMeta),
594 MemoryRegionType::Kernel => mark_ranges!(region, KernelMeta),
595 MemoryRegionType::Module => mark_ranges!(region, UnusableMemoryMeta),
596 MemoryRegionType::Framebuffer => mark_ranges!(region, ReservedMemoryMeta),
597 MemoryRegionType::Reclaimable => mark_ranges!(region, UnusableMemoryMeta),
598 MemoryRegionType::Usable => {} // By default it is initialized as usable.
599 }
600 }
601}
602
603/// Adds a temporary linear mapping for the metadata frames.
604///
605/// We only assume boot page table to contain 4G linear mapping. Thus if the
606/// physical memory is huge we end up depleted of linear virtual memory for
607/// initializing metadata.
608#[cfg(target_arch = "x86_64")]
609fn add_temp_linear_mapping(max_paddr: Paddr) {
610 use align_ext::AlignExt;
611
612 use crate::mm::kspace::LINEAR_MAPPING_BASE_VADDR;
613
614 const PADDR4G: Paddr = 0x1_0000_0000;
615
616 if max_paddr <= PADDR4G {
617 return;
618 }
619
620 // TODO: We don't know if the allocator would allocate from low to high or
621 // not. So we prepare all linear mappings in the boot page table. Hope it
622 // won't drag the boot performance much.
623 let end_paddr = max_paddr.align_up(PAGE_SIZE);
624 let prange = PADDR4G..end_paddr;
625 let prop = PageProperty {
626 flags: PageFlags::RW,
627 cache: CachePolicy::Writeback,
628 priv_flags: PrivilegedPageFlags::GLOBAL,
629 };
630
631 // SAFETY: we are doing the linear mapping for the kernel.
632 unsafe {
633 boot_pt::with_borrow(|boot_pt| {
634 for paddr in prange.step_by(PAGE_SIZE) {
635 let vaddr = LINEAR_MAPPING_BASE_VADDR + paddr;
636 boot_pt.map_base_page(vaddr, paddr, prop);
637 }
638 })
639 .unwrap();
640 }
641}