ostd/boot/memory_region.rs
1// SPDX-License-Identifier: MPL-2.0
2//! Information of memory regions in the boot phase.
3use vstd::prelude::*;
4use vstd_extra::prelude::*;
5
6use crate::mm::{Paddr, Vaddr};
7use crate::specs::arch::mm::{CONST_MAX_PADDR, MAX_PADDR, PAGE_SIZE};
8use crate::specs::mm::frame::memory_region_specs::{MemRegionModel, MemoryRegionArrayModel};
9
10use core::ops::Deref;
11
12// use align_ext::AlignExt;
13
14//use crate::mm::{kspace::kernel_loaded_offset, Paddr, Vaddr, PAGE_SIZE};
15
16/// The type of initial memory regions that are needed for the kernel.
17#[verus_verify]
18#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
19pub enum MemoryRegionType {
20 /// Maybe points to an unplugged DIMM module. It's bad anyway.
21 BadMemory = 0,
22 /// Some holes not specified by the bootloader/firmware. It may be used for
23 /// I/O memory but we don't know for sure.
24 Unknown = 1,
25 /// In ACPI spec, this area needs to be preserved when sleeping.
26 NonVolatileSleep = 2,
27 /// Reserved by BIOS or bootloader, do not use.
28 Reserved = 3,
29 /// The place where kernel sections are loaded.
30 Kernel = 4,
31 /// The place where kernel modules (e.g. initrd) are loaded, could be reused.
32 Module = 5,
33 /// The memory region provided as the framebuffer.
34 Framebuffer = 6,
35 /// Once used in the boot phase. Kernel can reclaim it after initialization.
36 Reclaimable = 7,
37 /// Directly usable by the frame allocator.
38 Usable = 8,
39}
40
41/// The information of initial memory regions that are needed by the kernel.
42/// The sections are **not** guaranteed to not overlap. The region must be page aligned.
43#[verus_verify]
44#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
45pub struct MemoryRegion {
46 base: usize,
47 len: usize,
48 typ: MemoryRegionType,
49}
50
51verus! {
52
53impl MemoryRegionType {
54 pub open spec fn to_int(self) -> int {
55 match self {
56 MemoryRegionType::BadMemory => 0,
57 MemoryRegionType::Unknown => 1,
58 MemoryRegionType::NonVolatileSleep => 2,
59 MemoryRegionType::Reserved => 3,
60 MemoryRegionType::Kernel => 4,
61 MemoryRegionType::Module => 5,
62 MemoryRegionType::Framebuffer => 6,
63 MemoryRegionType::Reclaimable => 7,
64 MemoryRegionType::Usable => 8,
65 }
66 }
67}
68
69impl Inv for MemoryRegion {
70 closed spec fn inv(self) -> bool {
71 self.base + self.len <= CONST_MAX_PADDR
72 }
73}
74
75impl View for MemoryRegion {
76 type V = MemRegionModel;
77
78 closed spec fn view(&self) -> Self::V {
79 MemRegionModel { base: self.base as int, end: self.base + self.len, typ: self.typ.to_int() }
80 }
81}
82
83impl InvView for MemoryRegion {
84 proof fn view_preserves_inv(self) {
85 }
86}
87
88} // verus!
89#[verus_verify]
90impl MemoryRegion {
91 /// Constructs a valid memory region.
92 #[verus_spec(ret =>
93 requires
94 base + len <= MAX_PADDR(),
95 ensures
96 ret.inv(),
97 ret@ == (MemRegionModel {
98 base: base as int,
99 end: base + len,
100 typ: typ.to_int(),
101 }),
102 )]
103 pub const fn new(base: Paddr, len: usize, typ: MemoryRegionType) -> Self {
104 MemoryRegion { base, len, typ }
105 }
106
107 /// Constructs a bad memory region.
108 #[verus_spec(ret =>
109 ensures
110 ret.inv(),
111 ret@ == MemRegionModel::bad(),
112 )]
113 pub const fn bad() -> Self {
114 MemoryRegion {
115 base: 0,
116 len: 0,
117 typ: MemoryRegionType::BadMemory,
118 }
119 }
120
121 /*
122 /// Constructs a memory region where kernel sections are loaded.
123 ///
124 /// Most boot protocols do not mark the place where the kernel loads as unusable. In this case,
125 /// we need to explicitly construct and append this memory region.
126 pub fn kernel() -> Self {
127 // These are physical addresses provided by the linker script.
128 extern "C" {
129 fn __kernel_start();
130 fn __kernel_end();
131 }
132 MemoryRegion {
133 base: __kernel_start as usize - kernel_loaded_offset(),
134 len: __kernel_end as usize - __kernel_start as usize,
135 typ: MemoryRegionType::Kernel,
136 }
137 }
138
139 /// Constructs a framebuffer memory region.
140 pub fn framebuffer(fb: &crate::boot::BootloaderFramebufferArg) -> Self {
141 Self {
142 base: fb.address,
143 len: (fb.width * fb.height * fb.bpp).div_ceil(8), // round up when divide with 8 (bits/Byte)
144 typ: MemoryRegionType::Framebuffer,
145 }
146 }
147
148 /// Constructs a module memory region from a byte slice that lives in the linear mapping.
149 ///
150 /// # Panics
151 ///
152 /// This method will panic if the byte slice does not live in the linear mapping.
153 pub fn module(bytes: &[u8]) -> Self {
154 let vaddr = bytes.as_ptr() as Vaddr;
155 assert!(crate::mm::kspace::LINEAR_MAPPING_VADDR_RANGE.contains(&vaddr));
156
157 Self {
158 base: vaddr - crate::mm::kspace::LINEAR_MAPPING_BASE_VADDR,
159 len: bytes.len(),
160 typ: MemoryRegionType::Reclaimable,
161 }
162 } */
163
164 /// The physical address of the base of the region.
165 #[verus_verify(dual_spec)]
166 pub fn base(&self) -> Paddr {
167 self.base
168 }
169
170 /// The length in bytes of the region.
171 #[verus_verify(dual_spec)]
172 pub fn len(&self) -> usize {
173 self.len
174 }
175
176 /// The physical address of the end of the region.
177 #[verus_verify(dual_spec)]
178 #[verus_spec(requires self.inv())]
179 pub fn end(&self) -> Paddr {
180 self.base + self.len
181 }
182
183 /// Checks whether the region is empty
184 #[verus_verify(dual_spec)]
185 pub fn is_empty(&self) -> bool {
186 self.len == 0
187 }
188
189 /// The type of the region.
190 #[verus_verify(dual_spec)]
191 pub fn typ(&self) -> MemoryRegionType {
192 self.typ
193 }
194 /*
195 fn as_aligned(&self) -> Self {
196 let (base, end) = match self.typ() {
197 MemoryRegionType::Usable => (
198 self.base().align_up(PAGE_SIZE),
199 self.end().align_down(PAGE_SIZE),
200 ),
201 _ => (
202 self.base().align_down(PAGE_SIZE),
203 self.end().align_up(PAGE_SIZE),
204 ),
205 };
206 MemoryRegion {
207 base,
208 len: end - base,
209 typ: self.typ,
210 }
211 }*/
212}
213
214/// The maximum number of regions that can be handled.
215///
216/// The choice of 512 is probably fine since old Linux boot protocol only
217/// allows 128 regions.
218//
219// TODO: confirm the number or make it configurable.
220pub const MAX_REGIONS: usize = 512;
221
222/// A heapless set of memory regions.
223///
224/// The set cannot contain more than `LEN` regions.
225#[verus_verify]
226pub struct MemoryRegionArray<const LEN: usize = MAX_REGIONS> {
227 regions: [MemoryRegion; LEN],
228 count: usize,
229}
230
231verus! {
232
233impl<const LEN: usize> Inv for MemoryRegionArray<LEN> {
234 closed spec fn inv(self) -> bool {
235 self.count <= LEN
236 }
237}
238
239impl<const LEN: usize> View for MemoryRegionArray<LEN> {
240 type V = MemoryRegionArrayModel<LEN>;
241
242 closed spec fn view(&self) -> MemoryRegionArrayModel<LEN> {
243 MemoryRegionArrayModel { regions: Seq::new(self.count as nat, |i: int| self.regions[i]@) }
244 }
245}
246
247impl<const LEN: usize> InvView for MemoryRegionArray<LEN> {
248 proof fn view_preserves_inv(self) {
249 }
250}
251
252} // verus!
253#[verus_verify]
254impl<const LEN: usize> Default for MemoryRegionArray<LEN> {
255 #[verus_spec(ret =>
256 ensures
257 ret.inv(),
258 ret@ == MemoryRegionArrayModel::<LEN>::new()
259 )]
260 fn default() -> Self {
261 Self::new()
262 }
263}
264/*
265impl<const LEN: usize> Deref for MemoryRegionArray<LEN> {
266 type Target = [MemoryRegion];
267
268 fn deref(&self) -> &Self::Target {
269 &self.regions[..self.count]
270 }
271}*/
272#[verus_verify]
273impl<const LEN: usize> MemoryRegionArray<LEN> {
274 /// Constructs an empty set.
275 #[verus_spec(ret =>
276 ensures
277 ret.inv(),
278 ret@ == MemoryRegionArrayModel::<LEN>::new(),
279 )]
280 pub const fn new() -> Self {
281 let ret = Self {
282 regions: [MemoryRegion::bad(); LEN],
283 count: 0,
284 };
285
286 proof! {
287 assert(ret@.regions == Seq::<MemRegionModel>::empty());
288 };
289
290 ret
291 }
292
293 /// Appends a region to the set.
294 ///
295 /// If the set is full, an error is returned.
296 #[verus_spec(ret =>
297 requires
298 old(self).inv(),
299 region.inv(),
300 !old(self)@.full(),
301 ensures
302 self.inv(),
303 self@ == old(self)@.push(region@),
304 ret.is_ok(),
305 )]
306 pub fn push(&mut self, region: MemoryRegion) -> Result<(), &'static str> {
307 if self.count < self.regions.len() {
308 self.regions[self.count] = region;
309 self.count = self.count + 1;
310 proof! {
311 assert(self@.regions == old(self)@.regions.push(region@)) by {
312 assert (forall |i: int| 0 <= i && i < self@.regions.len() ==> #[trigger] self@.regions[i] == old(self)@.regions.push(region@)[i]);
313 };
314 };
315 Ok(())
316 } else {
317 Err("MemoryRegionArray is full")
318 }
319 }
320 /*
321 /// Sorts the regions and returns a full set of non-overlapping regions.
322 ///
323 /// If an address is in multiple regions, the region with the lowest
324 /// usability will be its type.
325 ///
326 /// All the addresses between 0 and the end of the last region will be in
327 /// the resulting set. If an address is not in any region, it will be marked
328 /// as [`MemoryRegionType::Unknown`].
329 ///
330 /// If any of the region boundaries are not page-aligned, they will be aligned
331 /// according to the type of the region.
332 ///
333 /// # Panics
334 ///
335 /// This method will panic if the number of output regions is greater than `LEN`.
336 pub fn into_non_overlapping(mut self) -> Self {
337 let max_addr = self
338 .iter()
339 .map(|r| r.end())
340 .max()
341 .unwrap_or(0)
342 .align_down(PAGE_SIZE);
343 self.regions.iter_mut().for_each(|r| *r = r.as_aligned());
344
345 let mut result = MemoryRegionArray::<LEN>::new();
346
347 let mut cur_right = 0;
348
349 while cur_right < max_addr {
350 // Find the most restrictive type.
351 let typ = self
352 .iter()
353 .filter(|region| (region.base()..region.end()).contains(&cur_right))
354 .map(|region| region.typ())
355 .min()
356 .unwrap_or(MemoryRegionType::Unknown);
357
358 // Find the right boundary.
359 let right = self
360 .iter()
361 .filter_map(|region| {
362 if region.base() > cur_right {
363 Some(region.base())
364 } else if region.end() > cur_right {
365 Some(region.end())
366 } else {
367 None
368 }
369 })
370 .min()
371 .unwrap();
372
373 result
374 .push(MemoryRegion::new(cur_right, right - cur_right, typ))
375 .unwrap();
376
377 cur_right = right;
378 }
379
380 // Merge the adjacent regions with the same type.
381 let mut merged_count = 1;
382 for i in 1..result.count {
383 if result[i].typ() == result.regions[merged_count - 1].typ() {
384 result.regions[merged_count - 1] = MemoryRegion::new(
385 result.regions[merged_count - 1].base(),
386 result.regions[merged_count - 1].len() + result[i].len(),
387 result.regions[merged_count - 1].typ(),
388 );
389 } else {
390 result.regions[merged_count] = result[i];
391 merged_count += 1;
392 }
393 }
394 result.count = merged_count;
395
396 result
397 }*/
398}
399
400#[cfg(ktest)]
401mod test {
402 use super::*;
403 use crate::prelude::ktest;
404
405 #[ktest]
406 fn test_sort_full_non_overlapping() {
407 let mut regions = MemoryRegionArray::<64>::new();
408 // Regions that can be combined.
409 regions
410 .push(MemoryRegion::new(
411 0,
412 PAGE_SIZE + 1,
413 MemoryRegionType::Usable,
414 ))
415 .unwrap();
416 regions
417 .push(MemoryRegion::new(
418 PAGE_SIZE - 1,
419 PAGE_SIZE + 2,
420 MemoryRegionType::Usable,
421 ))
422 .unwrap();
423 regions
424 .push(MemoryRegion::new(
425 PAGE_SIZE * 2,
426 PAGE_SIZE * 5,
427 MemoryRegionType::Usable,
428 ))
429 .unwrap();
430 // A punctured region.
431 regions
432 .push(MemoryRegion::new(
433 PAGE_SIZE * 3 + 1,
434 PAGE_SIZE - 2,
435 MemoryRegionType::BadMemory,
436 ))
437 .unwrap();
438 // A far region that left a hole in the middle.
439 regions
440 .push(MemoryRegion::new(
441 PAGE_SIZE * 9,
442 PAGE_SIZE * 2,
443 MemoryRegionType::Usable,
444 ))
445 .unwrap();
446
447 let regions = regions.into_non_overlapping();
448
449 assert_eq!(regions.count, 5);
450 assert_eq!(regions[0].base(), 0);
451 assert_eq!(regions[0].len(), PAGE_SIZE * 3);
452 assert_eq!(regions[0].typ(), MemoryRegionType::Usable);
453
454 assert_eq!(regions[1].base(), PAGE_SIZE * 3);
455 assert_eq!(regions[1].len(), PAGE_SIZE);
456 assert_eq!(regions[1].typ(), MemoryRegionType::BadMemory);
457
458 assert_eq!(regions[2].base(), PAGE_SIZE * 4);
459 assert_eq!(regions[2].len(), PAGE_SIZE * 3);
460 assert_eq!(regions[2].typ(), MemoryRegionType::Usable);
461
462 assert_eq!(regions[3].base(), PAGE_SIZE * 7);
463 assert_eq!(regions[3].len(), PAGE_SIZE * 2);
464 assert_eq!(regions[3].typ(), MemoryRegionType::Unknown);
465
466 assert_eq!(regions[4].base(), PAGE_SIZE * 9);
467 assert_eq!(regions[4].len(), PAGE_SIZE * 2);
468 assert_eq!(regions[4].typ(), MemoryRegionType::Usable);
469 }
470}