ostd/boot/
memory_region.rs

1// SPDX-License-Identifier: MPL-2.0
2
3//! Information of memory regions in the boot phase.
4
5use core::ops::Deref;
6
7use align_ext::AlignExt;
8
9use crate::mm::{PAGE_SIZE, Paddr, Vaddr, kspace::kernel_loaded_offset};
10
11/// The type of initial memory regions that are needed for the kernel.
12#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
13pub enum MemoryRegionType {
14    /// Maybe points to an unplugged DIMM module. It's bad anyway.
15    BadMemory = 0,
16    /// Some holes not specified by the bootloader/firmware. It may be used for
17    /// I/O memory but we don't know for sure.
18    Unknown = 1,
19    /// In ACPI spec, this area needs to be preserved when sleeping.
20    NonVolatileSleep = 2,
21    /// Reserved by BIOS or bootloader, do not use.
22    Reserved = 3,
23    /// The place where kernel sections are loaded.
24    Kernel = 4,
25    /// The place where kernel modules (e.g. initrd) are loaded, could be reused.
26    Module = 5,
27    /// The memory region provided as the framebuffer.
28    Framebuffer = 6,
29    /// Once used in the boot phase. Kernel can reclaim it after initialization.
30    Reclaimable = 7,
31    /// Directly usable by the frame allocator.
32    Usable = 8,
33}
34
35impl MemoryRegionType {
36    /// Returns whether the memory region corresponds to physical memory.
37    ///
38    /// The linear mapping will cover memory addresses up to the top of the physical memory.
39    /// Therefore, if this method returns `false`, the memory region may not be included in the
40    /// linear mapping.
41    pub fn is_physical(self) -> bool {
42        // Bad memory or I/O memory is not physical. All other memory should be physical.
43        !matches!(
44            self,
45            Self::BadMemory | Self::Unknown | Self::Reserved | Self::Framebuffer
46        )
47    }
48}
49
50/// The information of initial memory regions that are needed by the kernel.
51///
52/// The sections are **not** guaranteed to not overlap. The region must be page aligned.
53#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
54pub struct MemoryRegion {
55    base: usize,
56    len: usize,
57    typ: MemoryRegionType,
58}
59
60impl MemoryRegion {
61    /// Constructs a valid memory region.
62    pub const fn new(base: Paddr, len: usize, typ: MemoryRegionType) -> Self {
63        MemoryRegion { base, len, typ }
64    }
65
66    /// Constructs a bad memory region.
67    pub const fn bad() -> Self {
68        MemoryRegion {
69            base: 0,
70            len: 0,
71            typ: MemoryRegionType::BadMemory,
72        }
73    }
74
75    /// Constructs a memory region where kernel sections are loaded.
76    ///
77    /// Most boot protocols do not mark the place where the kernel loads as unusable. In this case,
78    /// we need to explicitly construct and append this memory region.
79    pub fn kernel() -> Self {
80        // These are physical addresses provided by the linker script.
81        unsafe extern "C" {
82            fn __kernel_start();
83            fn __kernel_end();
84        }
85        MemoryRegion {
86            base: __kernel_start as *const () as usize - kernel_loaded_offset(),
87            len: __kernel_end as *const () as usize - __kernel_start as *const () as usize,
88            typ: MemoryRegionType::Kernel,
89        }
90    }
91
92    /// Constructs a framebuffer memory region.
93    pub fn framebuffer(fb: &crate::boot::BootloaderFramebufferArg) -> Self {
94        Self {
95            base: fb.address,
96            len: (fb.width * fb.height * fb.bpp).div_ceil(8), // round up when divide with 8 (bits/Byte)
97            typ: MemoryRegionType::Framebuffer,
98        }
99    }
100
101    /// Constructs a module memory region from a byte slice that lives in the linear mapping.
102    ///
103    /// # Panics
104    ///
105    /// This method will panic if the byte slice does not live in the linear mapping.
106    pub fn module(bytes: &[u8]) -> Self {
107        let vaddr = bytes.as_ptr() as Vaddr;
108        assert!(crate::mm::kspace::LINEAR_MAPPING_VADDR_RANGE.contains(&vaddr));
109
110        Self {
111            base: vaddr - crate::mm::kspace::LINEAR_MAPPING_BASE_VADDR,
112            len: bytes.len(),
113            typ: MemoryRegionType::Reclaimable,
114        }
115    }
116
117    /// Returns the physical address of the base of the region.
118    pub fn base(&self) -> Paddr {
119        self.base
120    }
121
122    /// Returns the length in bytes of the region.
123    pub fn len(&self) -> usize {
124        self.len
125    }
126
127    /// Returns the physical address of the end of the region.
128    pub fn end(&self) -> Paddr {
129        self.base + self.len
130    }
131
132    /// Checks whether the region is empty
133    pub fn is_empty(&self) -> bool {
134        self.len == 0
135    }
136
137    /// Returns the type of the region.
138    pub fn typ(&self) -> MemoryRegionType {
139        self.typ
140    }
141
142    fn as_aligned(&self) -> Self {
143        let (base, end) = match self.typ() {
144            MemoryRegionType::Usable => (
145                self.base().align_up(PAGE_SIZE),
146                self.end().align_down(PAGE_SIZE),
147            ),
148            _ => (
149                self.base().align_down(PAGE_SIZE),
150                self.end().align_up(PAGE_SIZE),
151            ),
152        };
153        MemoryRegion {
154            base,
155            len: end - base,
156            typ: self.typ,
157        }
158    }
159}
160
161/// The maximum number of regions that can be handled.
162///
163/// The choice of 512 is probably fine since old Linux boot protocol only
164/// allows 128 regions.
165//
166// TODO: confirm the number or make it configurable.
167const MAX_REGIONS: usize = 512;
168
169/// A heapless set of memory regions.
170///
171/// The set cannot contain more than `LEN` regions.
172pub(crate) struct MemoryRegionArray<const LEN: usize = MAX_REGIONS> {
173    regions: [MemoryRegion; LEN],
174    count: usize,
175}
176
177impl<const LEN: usize> Default for MemoryRegionArray<LEN> {
178    fn default() -> Self {
179        Self::new()
180    }
181}
182
183impl<const LEN: usize> Deref for MemoryRegionArray<LEN> {
184    type Target = [MemoryRegion];
185
186    fn deref(&self) -> &Self::Target {
187        &self.regions[..self.count]
188    }
189}
190
191/// An error returned by [`MemoryRegionArray::push`] when the array is full.
192#[derive(Debug)]
193pub(crate) struct ArrayFullError;
194
195impl<const LEN: usize> MemoryRegionArray<LEN> {
196    /// Constructs an empty set.
197    pub(crate) const fn new() -> Self {
198        Self {
199            regions: [MemoryRegion::bad(); LEN],
200            count: 0,
201        }
202    }
203
204    /// Appends a region to the set.
205    ///
206    /// If the set is full, an error is returned.
207    pub(crate) fn push(&mut self, region: MemoryRegion) -> Result<(), ArrayFullError> {
208        if self.count >= self.regions.len() {
209            return Err(ArrayFullError);
210        }
211
212        self.regions[self.count] = region;
213        self.count += 1;
214
215        Ok(())
216    }
217
218    /// Sorts the regions and returns a full set of non-overlapping regions.
219    ///
220    /// If an address is in multiple regions, the region with the lowest
221    /// usability will be its type.
222    ///
223    /// All the addresses between 0 and the end of the last region will be in
224    /// the resulting set. If an address is not in any region, it will be marked
225    /// as [`MemoryRegionType::Unknown`].
226    ///
227    /// If any of the region boundaries are not page-aligned, they will be aligned
228    /// according to the type of the region.
229    ///
230    /// # Panics
231    ///
232    /// This method will panic if the number of output regions is greater than `LEN`.
233    pub(crate) fn into_non_overlapping(mut self) -> Self {
234        let max_addr = self
235            .iter()
236            .map(|r| r.end())
237            .max()
238            .unwrap_or(0)
239            .align_down(PAGE_SIZE);
240        self.regions.iter_mut().for_each(|r| *r = r.as_aligned());
241
242        let mut result = MemoryRegionArray::<LEN>::new();
243
244        let mut cur_right = 0;
245
246        while cur_right < max_addr {
247            // Find the most restrictive type.
248            let typ = self
249                .iter()
250                .filter(|region| (region.base()..region.end()).contains(&cur_right))
251                .map(|region| region.typ())
252                .min()
253                .unwrap_or(MemoryRegionType::Unknown);
254
255            // Find the right boundary.
256            let right = self
257                .iter()
258                .filter_map(|region| {
259                    if region.base() > cur_right {
260                        Some(region.base())
261                    } else if region.end() > cur_right {
262                        Some(region.end())
263                    } else {
264                        None
265                    }
266                })
267                .min()
268                .unwrap();
269
270            result
271                .push(MemoryRegion::new(cur_right, right - cur_right, typ))
272                .unwrap();
273
274            cur_right = right;
275        }
276
277        // Merge the adjacent regions with the same type.
278        let mut merged_count = 1;
279        for i in 1..result.count {
280            if result[i].typ() == result.regions[merged_count - 1].typ() {
281                result.regions[merged_count - 1] = MemoryRegion::new(
282                    result.regions[merged_count - 1].base(),
283                    result.regions[merged_count - 1].len() + result[i].len(),
284                    result.regions[merged_count - 1].typ(),
285                );
286            } else {
287                result.regions[merged_count] = result[i];
288                merged_count += 1;
289            }
290        }
291        result.count = merged_count;
292
293        result
294    }
295}
296
297#[cfg(ktest)]
298mod test {
299    use super::*;
300    use crate::prelude::ktest;
301
302    #[ktest]
303    fn sort_full_non_overlapping() {
304        let mut regions = MemoryRegionArray::<64>::new();
305        // Regions that can be combined.
306        regions
307            .push(MemoryRegion::new(
308                0,
309                PAGE_SIZE + 1,
310                MemoryRegionType::Usable,
311            ))
312            .unwrap();
313        regions
314            .push(MemoryRegion::new(
315                PAGE_SIZE - 1,
316                PAGE_SIZE + 2,
317                MemoryRegionType::Usable,
318            ))
319            .unwrap();
320        regions
321            .push(MemoryRegion::new(
322                PAGE_SIZE * 2,
323                PAGE_SIZE * 5,
324                MemoryRegionType::Usable,
325            ))
326            .unwrap();
327        // A punctured region.
328        regions
329            .push(MemoryRegion::new(
330                PAGE_SIZE * 3 + 1,
331                PAGE_SIZE - 2,
332                MemoryRegionType::BadMemory,
333            ))
334            .unwrap();
335        // A far region that left a hole in the middle.
336        regions
337            .push(MemoryRegion::new(
338                PAGE_SIZE * 9,
339                PAGE_SIZE * 2,
340                MemoryRegionType::Usable,
341            ))
342            .unwrap();
343
344        let regions = regions.into_non_overlapping();
345
346        assert_eq!(regions.count, 5);
347        assert_eq!(regions[0].base(), 0);
348        assert_eq!(regions[0].len(), PAGE_SIZE * 3);
349        assert_eq!(regions[0].typ(), MemoryRegionType::Usable);
350
351        assert_eq!(regions[1].base(), PAGE_SIZE * 3);
352        assert_eq!(regions[1].len(), PAGE_SIZE);
353        assert_eq!(regions[1].typ(), MemoryRegionType::BadMemory);
354
355        assert_eq!(regions[2].base(), PAGE_SIZE * 4);
356        assert_eq!(regions[2].len(), PAGE_SIZE * 3);
357        assert_eq!(regions[2].typ(), MemoryRegionType::Usable);
358
359        assert_eq!(regions[3].base(), PAGE_SIZE * 7);
360        assert_eq!(regions[3].len(), PAGE_SIZE * 2);
361        assert_eq!(regions[3].typ(), MemoryRegionType::Unknown);
362
363        assert_eq!(regions[4].base(), PAGE_SIZE * 9);
364        assert_eq!(regions[4].len(), PAGE_SIZE * 2);
365        assert_eq!(regions[4].typ(), MemoryRegionType::Usable);
366    }
367}