ostd/mm/kspace/
mod.rs

1// SPDX-License-Identifier: MPL-2.0
2//! Kernel memory space management.
3//!
4//! The kernel memory space is currently managed as follows, if the
5//! address width is 48 bits (with 47 bits kernel space).
6//!
7//! TODO: the cap of linear mapping (the start of vm alloc) are raised
8//! to workaround for high IO in TDX. We need actual vm alloc API to have
9//! a proper fix.
10//!
11//! ```text
12//! +-+ <- the highest used address (0xffff_ffff_ffff_0000)
13//! | |         For the kernel code, 1 GiB.
14//! +-+ <- 0xffff_ffff_8000_0000
15//! | |
16//! | |         Unused hole.
17//! +-+ <- 0xffff_e100_0000_0000
18//! | |         For frame metadata, 1 TiB.
19//! +-+ <- 0xffff_e000_0000_0000
20//! | |         For [`KVirtArea`], 32 TiB.
21//! +-+ <- the middle of the higher half (0xffff_c000_0000_0000)
22//! | |
23//! | |
24//! | |
25//! | |         For linear mappings, 64 TiB.
26//! | |         Mapped physical addresses are untracked.
27//! | |
28//! | |
29//! | |
30//! +-+ <- the base of high canonical address (0xffff_8000_0000_0000)
31//! ```
32//!
33//! If the address width is (according to [`crate::arch::mm::PagingConsts`])
34//! 39 bits or 57 bits, the memory space just adjust proportionally.
35//pub(crate) mod kvirt_area;
36use vstd::prelude::*;
37
38use core::ops::Range;
39
40//use log::info;
41//use spin::Once;
42#[cfg(ktest)]
43mod test;
44
45use super::{
46    frame::{
47        meta::{mapping, AnyFrameMeta, MetaPageMeta},
48        Frame, Segment,
49    },
50    page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
51    page_table::{PageTable, PageTableConfig},
52    Paddr, PagingConstsTrait, Vaddr,
53};
54use crate::{
55    boot::memory_region::MemoryRegionType,
56    mm::{largest_pages, PagingLevel},
57    specs::arch::{PageTableEntry, PagingConsts},
58    //task::disable_preempt,
59};
60use vstd_extra::extern_const::*;
61
62verus! {
63
64/// The shortest supported address width is 39 bits. And the literal
65/// values are written for 48 bits address width. Adjust the values
66/// by arithmetic left shift.
67pub const ADDR_WIDTH_SHIFT: isize = 48 - 48;
68
69/// Start of the kernel address space.
70/// This is the _lowest_ address of the x86-64's _high_ canonical addresses.
71#[cfg(not(target_arch = "loongarch64"))]
72pub const KERNEL_BASE_VADDR: Vaddr = 0xffff_8000_0000_0000 << ADDR_WIDTH_SHIFT;
73
74#[cfg(target_arch = "loongarch64")]
75pub const KERNEL_BASE_VADDR: Vaddr = 0x9000_0000_0000_0000 << ADDR_WIDTH_SHIFT;
76
77/// End of the kernel address space (non inclusive).
78pub const KERNEL_END_VADDR: Vaddr = 0xffff_ffff_ffff_0000 << ADDR_WIDTH_SHIFT;
79
80/*
81/// The kernel code is linear mapped to this address.
82///
83/// FIXME: This offset should be randomly chosen by the loader or the
84/// boot compatibility layer. But we disabled it because OSTD
85/// doesn't support relocatable kernel yet.
86pub fn kernel_loaded_offset() -> usize {
87    KERNEL_CODE_BASE_VADDR
88}*/
89
90#[cfg(target_arch = "x86_64")]
91pub const KERNEL_CODE_BASE_VADDR: usize = 0xffff_ffff_8000_0000 << ADDR_WIDTH_SHIFT;
92
93#[cfg(target_arch = "riscv64")]
94pub const KERNEL_CODE_BASE_VADDR: usize = 0xffff_ffff_0000_0000 << ADDR_WIDTH_SHIFT;
95
96#[cfg(target_arch = "loongarch64")]
97pub const KERNEL_CODE_BASE_VADDR: usize = 0x9000_0000_0000_0000 << ADDR_WIDTH_SHIFT;
98
99extern_const!(
100    pub FRAME_METADATA_CAP_VADDR [FRAME_METADATA_CAP_VADDR_SPEC, CONST_FRAME_METADATA_CAP_VADDR]:
101        Vaddr = 0xffff_fff0_8000_0000 << ADDR_WIDTH_SHIFT
102);
103
104extern_const!(
105    pub FRAME_METADATA_BASE_VADDR [FRAME_METADATA_BASE_VADDR_SPEC, CONST_FRAME_METADATA_BASE_VADDR]:
106        Vaddr = 0xffff_fff0_0000_0000 << ADDR_WIDTH_SHIFT
107);
108
109extern_const!(
110    pub VMALLOC_BASE_VADDR [VMALLOC_BASE_VADDR_SPEC, CONST_VMALLOC_BASE_VADDR]:
111        Vaddr = 0xffff_c000_0000_0000 << ADDR_WIDTH_SHIFT
112);
113
114extern_const!(
115    pub VMALLOC_VADDR_RANGE [VMALLOC_VADDR_RANGE_SPEC, CONST_VMALLOC_VADDR_RANGE]:
116        Range<Vaddr> = CONST_VMALLOC_BASE_VADDR..CONST_FRAME_METADATA_BASE_VADDR
117);
118
119extern_const!(
120    /// The base address of the linear mapping of all physical
121    /// memory in the kernel address space.
122    pub LINEAR_MAPPING_BASE_VADDR [LINEAR_MAPPING_BASE_VADDR_SPEC, CONST_LINEAR_MAPPING_BASE_VADDR]:
123        Vaddr = 0xffff_8000_0000_0000 << ADDR_WIDTH_SHIFT
124);
125
126extern_const!(
127    pub LINEAR_MAPPING_VADDR_RANGE [LINEAR_MAPPING_VADDR_RANGE_SPEC, CONST_LINEAR_MAPPING_VADDR_RANGE]:
128        Range<Vaddr> = CONST_LINEAR_MAPPING_BASE_VADDR..CONST_VMALLOC_BASE_VADDR
129);
130
131/*
132#[cfg(not(target_arch = "loongarch64"))]
133pub const LINEAR_MAPPING_BASE_VADDR: Vaddr = 0xffff_8000_0000_0000 << ADDR_WIDTH_SHIFT;
134#[cfg(target_arch = "loongarch64")]
135pub const LINEAR_MAPPING_BASE_VADDR: Vaddr = 0x9000_0000_0000_0000 << ADDR_WIDTH_SHIFT;
136pub const LINEAR_MAPPING_VADDR_RANGE: Range<Vaddr> = LINEAR_MAPPING_BASE_VADDR..VMALLOC_BASE_VADDR;
137*/
138
139/// Convert physical address to virtual address using offset, only available inside `ostd`
140pub open spec fn paddr_to_vaddr_spec(pa: Paddr) -> usize {
141    (pa + LINEAR_MAPPING_BASE_VADDR()) as usize
142}
143
144#[verifier::when_used_as_spec(paddr_to_vaddr_spec)]
145pub fn paddr_to_vaddr(pa: Paddr) -> usize
146    requires
147        pa + LINEAR_MAPPING_BASE_VADDR() < usize::MAX,
148    returns
149        paddr_to_vaddr_spec(pa),
150{
151    //debug_assert!(pa < VMALLOC_BASE_VADDR - LINEAR_MAPPING_BASE_VADDR);
152    pa + LINEAR_MAPPING_BASE_VADDR()
153}
154
155/*
156/// The kernel page table instance.
157///
158/// It manages the kernel mapping of all address spaces by sharing the kernel part. And it
159/// is unlikely to be activated.
160pub static KERNEL_PAGE_TABLE: Once<PageTable<KernelPtConfig>> = Once::new();
161*/
162
163#[derive(Clone, Debug)]
164pub(crate) struct KernelPtConfig {}
165
166/*
167// We use the first available PTE bit to mark the frame as tracked.
168// SAFETY: `item_into_raw` and `item_from_raw` are implemented correctly,
169unsafe impl PageTableConfig for KernelPtConfig {
170    open spec fn TOP_LEVEL_INDEX_RANGE_spec() -> Range<usize> {
171        256..512
172    }
173
174    fn TOP_LEVEL_INDEX_RANGE() -> (r: Range<usize>)
175        ensures
176            r == Self::TOP_LEVEL_INDEX_RANGE_spec(),
177    {
178        256..512
179    }
180
181    fn TOP_LEVEL_CAN_UNMAP() -> (b: bool)
182        ensures
183            b == Self::TOP_LEVEL_CAN_UNMAP_spec(),
184    {
185        false
186    }
187
188    type E = PageTableEntry;
189    type C = PagingConsts;
190
191    type Item = MappedItem;
192
193    fn item_into_raw(item: Self::Item) -> (Paddr, PagingLevel, PageProperty) {
194        match item {
195            MappedItem::Tracked(frame, mut prop) => {
196                debug_assert!(!prop.flags.contains(PageFlags::AVAIL1));
197                prop.flags |= PageFlags::AVAIL1;
198                let level = frame.map_level();
199                let paddr = frame.into_raw();
200                (paddr, level, prop)
201            }
202            MappedItem::Untracked(pa, level, mut prop) => {
203                debug_assert!(!prop.flags.contains(PageFlags::AVAIL1));
204                prop.flags -= PageFlags::AVAIL1;
205                (pa, level, prop)
206            }
207        }
208    }
209
210    unsafe fn item_from_raw(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self::Item {
211        if prop.flags.contains(PageFlags::AVAIL1) {
212            debug_assert_eq!(level, 1);
213            // SAFETY: The caller ensures safety.
214            let frame = unsafe { Frame::<dyn AnyFrameMeta>::from_raw(paddr) };
215            MappedItem::Tracked(frame, prop)
216        } else {
217            MappedItem::Untracked(paddr, level, prop)
218        }
219    }
220}
221*/
222} // verus!
223/*
224#[derive(Clone, Debug, PartialEq, Eq)]
225pub(crate) enum MappedItem {
226    Tracked(Frame<dyn AnyFrameMeta>, PageProperty),
227    Untracked(Paddr, PagingLevel, PageProperty),
228}
229*/
230// /// Initializes the kernel page table.
231// ///
232// /// This function should be called after:
233// ///  - the page allocator and the heap allocator are initialized;
234// ///  - the memory regions are initialized.
235// ///
236// /// This function should be called before:
237// ///  - any initializer that modifies the kernel page table.
238// pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
239//     info!("Initializing the kernel page table");
240//     // Start to initialize the kernel page table.
241//     let kpt = PageTable::<KernelPtConfig>::new_kernel_page_table();
242//     let preempt_guard = disable_preempt();
243//     // In LoongArch64, we don't need to do linear mappings for the kernel because of DMW0.
244//     #[cfg(not(target_arch = "loongarch64"))]
245//     // Do linear mappings for the kernel.
246//     {
247//         let max_paddr = crate::mm::frame::max_paddr();
248//         let from = LINEAR_MAPPING_BASE_VADDR..LINEAR_MAPPING_BASE_VADDR + max_paddr;
249//         let prop = PageProperty {
250//             flags: PageFlags::RW,
251//             cache: CachePolicy::Writeback,
252//             priv_flags: PrivilegedPageFlags::GLOBAL,
253//         };
254//         let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
255//         for (pa, level) in largest_pages::<KernelPtConfig>(from.start, 0, max_paddr) {
256//             // SAFETY: we are doing the linear mapping for the kernel.
257//             unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) }
258//                 .expect("Kernel linear address space is mapped twice");
259//         }
260//     }
261//     // Map the metadata pages.
262//     {
263//         let start_va = mapping::frame_to_meta::<PagingConsts>(0);
264//         let from = start_va..start_va + meta_pages.size();
265//         let prop = PageProperty {
266//             flags: PageFlags::RW,
267//             cache: CachePolicy::Writeback,
268//             priv_flags: PrivilegedPageFlags::GLOBAL,
269//         };
270//         let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
271//         // We use untracked mapping so that we can benefit from huge pages.
272//         // We won't unmap them anyway, so there's no leaking problem yet.
273//         // TODO: support tracked huge page mapping.
274//         let pa_range = meta_pages.into_raw();
275//         for (pa, level) in
276//             largest_pages::<KernelPtConfig>(from.start, pa_range.start, pa_range.len())
277//         {
278//             // SAFETY: We are doing the metadata mappings for the kernel.
279//             unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) }
280//                 .expect("Frame metadata address space is mapped twice");
281//         }
282//     }
283//     // In LoongArch64, we don't need to do linear mappings for the kernel code because of DMW0.
284//     #[cfg(not(target_arch = "loongarch64"))]
285//     // Map for the kernel code itself.
286//     // TODO: set separated permissions for each segments in the kernel.
287//     {
288//         let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions;
289//         let region = regions
290//             .iter()
291//             .find(|r| r.typ() == MemoryRegionType::Kernel)
292//             .unwrap();
293//         let offset = kernel_loaded_offset();
294//         let from = region.base() + offset..region.end() + offset;
295//         let prop = PageProperty {
296//             flags: PageFlags::RWX,
297//             cache: CachePolicy::Writeback,
298//             priv_flags: PrivilegedPageFlags::GLOBAL,
299//         };
300//         let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
301//         for (pa, level) in largest_pages::<KernelPtConfig>(from.start, region.base(), from.len()) {
302//             // SAFETY: we are doing the kernel code mapping.
303//             unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) }
304//                 .expect("Kernel code mapped twice");
305//         }
306//     }
307//     KERNEL_PAGE_TABLE.call_once(|| kpt);
308// }
309// /// Activates the kernel page table.
310// ///
311// /// # Safety
312// ///
313// /// This function should only be called once per CPU.
314// pub unsafe fn activate_kernel_page_table() {
315//     let kpt = KERNEL_PAGE_TABLE
316//         .get()
317//         .expect("The kernel page table is not initialized yet");
318//     // SAFETY: the kernel page table is initialized properly.
319//     unsafe {
320//         kpt.first_activate_unchecked();
321//         crate::arch::mm::tlb_flush_all_including_global();
322//     }
323//     // SAFETY: the boot page table is OK to be dismissed now since
324//     // the kernel page table is activated just now.
325//     unsafe {
326//         crate::mm::page_table::boot_pt::dismiss();
327//     }
328// }