ostd/mm/kspace/
mod.rs

1// SPDX-License-Identifier: MPL-2.0
2//! Kernel memory space management.
3//!
4//! The kernel memory space is currently managed as follows, if the
5//! address width is 48 bits (with 47 bits kernel space).
6//!
7//! TODO: the cap of linear mapping (the start of vm alloc) are raised
8//! to workaround for high IO in TDX. We need actual vm alloc API to have
9//! a proper fix.
10//!
11//! ```text
12//! +-+ <- the highest used address (0xffff_ffff_ffff_0000)
13//! | |         For the kernel code, 1 GiB.
14//! +-+ <- 0xffff_ffff_8000_0000
15//! | |
16//! | |         Unused hole.
17//! +-+ <- 0xffff_e100_0000_0000
18//! | |         For frame metadata, 1 TiB.
19//! +-+ <- 0xffff_e000_0000_0000
20//! | |         For [`KVirtArea`], 32 TiB.
21//! +-+ <- the middle of the higher half (0xffff_c000_0000_0000)
22//! | |
23//! | |
24//! | |
25//! | |         For linear mappings, 64 TiB.
26//! | |         Mapped physical addresses are untracked.
27//! | |
28//! | |
29//! | |
30//! +-+ <- the base of high canonical address (0xffff_8000_0000_0000)
31//! ```
32//!
33//! If the address width is (according to [`crate::arch::mm::PagingConsts`])
34//! 39 bits or 57 bits, the memory space just adjust proportionally.
35//pub(crate) mod kvirt_area;
36use vstd::prelude::*;
37
38use core::ops::Range;
39
40//use log::info;
41//use spin::Once;
42#[cfg(ktest)]
43mod test;
44
45use super::{
46    frame::{
47        meta::{mapping, AnyFrameMeta, MetaPageMeta},
48        Frame, Segment,
49    },
50    page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
51    page_table::{PageTable, PageTableConfig},
52    Paddr, PagingConstsTrait, Vaddr,
53};
54use crate::{
55    boot::memory_region::MemoryRegionType,
56    mm::{largest_pages, PagingLevel},
57    specs::arch::{PageTableEntry, PagingConsts},
58    //task::disable_preempt,
59};
60
61verus! {
62
63/// The shortest supported address width is 39 bits. And the literal
64/// values are written for 48 bits address width. Adjust the values
65/// by arithmetic left shift.
66pub const ADDR_WIDTH_SHIFT: isize = 48 - 48;
67
68/// Start of the kernel address space.
69/// This is the _lowest_ address of the x86-64's _high_ canonical addresses.
70#[cfg(not(target_arch = "loongarch64"))]
71pub const KERNEL_BASE_VADDR: Vaddr = 0xffff_8000_0000_0000 << ADDR_WIDTH_SHIFT;
72
73#[cfg(target_arch = "loongarch64")]
74pub const KERNEL_BASE_VADDR: Vaddr = 0x9000_0000_0000_0000 << ADDR_WIDTH_SHIFT;
75
76/// End of the kernel address space (non inclusive).
77pub const KERNEL_END_VADDR: Vaddr = 0xffff_ffff_ffff_0000 << ADDR_WIDTH_SHIFT;
78
79/*
80/// The kernel code is linear mapped to this address.
81///
82/// FIXME: This offset should be randomly chosen by the loader or the
83/// boot compatibility layer. But we disabled it because OSTD
84/// doesn't support relocatable kernel yet.
85pub fn kernel_loaded_offset() -> usize {
86    KERNEL_CODE_BASE_VADDR
87}*/
88
89#[cfg(target_arch = "x86_64")]
90pub const KERNEL_CODE_BASE_VADDR: usize = 0xffff_ffff_8000_0000 << ADDR_WIDTH_SHIFT;
91
92#[cfg(target_arch = "riscv64")]
93pub const KERNEL_CODE_BASE_VADDR: usize = 0xffff_ffff_0000_0000 << ADDR_WIDTH_SHIFT;
94
95#[cfg(target_arch = "loongarch64")]
96pub const KERNEL_CODE_BASE_VADDR: usize = 0x9000_0000_0000_0000 << ADDR_WIDTH_SHIFT;
97
98pub const FRAME_METADATA_CAP_VADDR: Vaddr = 0xffff_fff0_8000_0000 << ADDR_WIDTH_SHIFT;
99
100pub const FRAME_METADATA_BASE_VADDR: Vaddr = 0xffff_fff0_0000_0000 << ADDR_WIDTH_SHIFT;
101
102pub const VMALLOC_BASE_VADDR: Vaddr = 0xffff_c000_0000_0000 << ADDR_WIDTH_SHIFT;
103
104pub const VMALLOC_VADDR_RANGE: Range<Vaddr> = VMALLOC_BASE_VADDR..FRAME_METADATA_BASE_VADDR;
105
106/// The base address of the linear mapping of all physical
107/// memory in the kernel address space.
108pub const LINEAR_MAPPING_BASE_VADDR: Vaddr = 0xffff_8000_0000_0000 << ADDR_WIDTH_SHIFT;
109
110pub const LINEAR_MAPPING_VADDR_RANGE: Range<Vaddr> = LINEAR_MAPPING_BASE_VADDR..VMALLOC_BASE_VADDR;
111
112/*
113#[cfg(not(target_arch = "loongarch64"))]
114pub const LINEAR_MAPPING_BASE_VADDR: Vaddr = 0xffff_8000_0000_0000 << ADDR_WIDTH_SHIFT;
115#[cfg(target_arch = "loongarch64")]
116pub const LINEAR_MAPPING_BASE_VADDR: Vaddr = 0x9000_0000_0000_0000 << ADDR_WIDTH_SHIFT;
117pub const LINEAR_MAPPING_VADDR_RANGE: Range<Vaddr> = LINEAR_MAPPING_BASE_VADDR..VMALLOC_BASE_VADDR;
118*/
119
120/// Convert physical address to virtual address using offset, only available inside `ostd`
121pub open spec fn paddr_to_vaddr_spec(pa: Paddr) -> usize {
122    (pa + LINEAR_MAPPING_BASE_VADDR) as usize
123}
124
125#[verifier::when_used_as_spec(paddr_to_vaddr_spec)]
126pub fn paddr_to_vaddr(pa: Paddr) -> usize
127    requires
128        pa + LINEAR_MAPPING_BASE_VADDR < usize::MAX,
129    returns
130        paddr_to_vaddr_spec(pa),
131{
132    //debug_assert!(pa < VMALLOC_BASE_VADDR - LINEAR_MAPPING_BASE_VADDR);
133    pa + LINEAR_MAPPING_BASE_VADDR
134}
135
136/*
137/// The kernel page table instance.
138///
139/// It manages the kernel mapping of all address spaces by sharing the kernel part. And it
140/// is unlikely to be activated.
141pub static KERNEL_PAGE_TABLE: Once<PageTable<KernelPtConfig>> = Once::new();
142*/
143
144#[derive(Clone, Debug)]
145pub(crate) struct KernelPtConfig {}
146
147/*
148// We use the first available PTE bit to mark the frame as tracked.
149// SAFETY: `item_into_raw` and `item_from_raw` are implemented correctly,
150unsafe impl PageTableConfig for KernelPtConfig {
151    open spec fn TOP_LEVEL_INDEX_RANGE_spec() -> Range<usize> {
152        256..512
153    }
154
155    fn TOP_LEVEL_INDEX_RANGE() -> (r: Range<usize>)
156        ensures
157            r == Self::TOP_LEVEL_INDEX_RANGE_spec(),
158    {
159        256..512
160    }
161
162    fn TOP_LEVEL_CAN_UNMAP() -> (b: bool)
163        ensures
164            b == Self::TOP_LEVEL_CAN_UNMAP_spec(),
165    {
166        false
167    }
168
169    type E = PageTableEntry;
170    type C = PagingConsts;
171
172    type Item = MappedItem;
173
174    fn item_into_raw(item: Self::Item) -> (Paddr, PagingLevel, PageProperty) {
175        match item {
176            MappedItem::Tracked(frame, mut prop) => {
177                debug_assert!(!prop.flags.contains(PageFlags::AVAIL1));
178                prop.flags |= PageFlags::AVAIL1;
179                let level = frame.map_level();
180                let paddr = frame.into_raw();
181                (paddr, level, prop)
182            }
183            MappedItem::Untracked(pa, level, mut prop) => {
184                debug_assert!(!prop.flags.contains(PageFlags::AVAIL1));
185                prop.flags -= PageFlags::AVAIL1;
186                (pa, level, prop)
187            }
188        }
189    }
190
191    unsafe fn item_from_raw(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self::Item {
192        if prop.flags.contains(PageFlags::AVAIL1) {
193            debug_assert_eq!(level, 1);
194            // SAFETY: The caller ensures safety.
195            let frame = unsafe { Frame::<dyn AnyFrameMeta>::from_raw(paddr) };
196            MappedItem::Tracked(frame, prop)
197        } else {
198            MappedItem::Untracked(paddr, level, prop)
199        }
200    }
201}
202*/
203} // verus!
204/*
205#[derive(Clone, Debug, PartialEq, Eq)]
206pub(crate) enum MappedItem {
207    Tracked(Frame<dyn AnyFrameMeta>, PageProperty),
208    Untracked(Paddr, PagingLevel, PageProperty),
209}
210*/
211// /// Initializes the kernel page table.
212// ///
213// /// This function should be called after:
214// ///  - the page allocator and the heap allocator are initialized;
215// ///  - the memory regions are initialized.
216// ///
217// /// This function should be called before:
218// ///  - any initializer that modifies the kernel page table.
219// pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
220//     info!("Initializing the kernel page table");
221//     // Start to initialize the kernel page table.
222//     let kpt = PageTable::<KernelPtConfig>::new_kernel_page_table();
223//     let preempt_guard = disable_preempt();
224//     // In LoongArch64, we don't need to do linear mappings for the kernel because of DMW0.
225//     #[cfg(not(target_arch = "loongarch64"))]
226//     // Do linear mappings for the kernel.
227//     {
228//         let max_paddr = crate::mm::frame::max_paddr();
229//         let from = LINEAR_MAPPING_BASE_VADDR..LINEAR_MAPPING_BASE_VADDR + max_paddr;
230//         let prop = PageProperty {
231//             flags: PageFlags::RW,
232//             cache: CachePolicy::Writeback,
233//             priv_flags: PrivilegedPageFlags::GLOBAL,
234//         };
235//         let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
236//         for (pa, level) in largest_pages::<KernelPtConfig>(from.start, 0, max_paddr) {
237//             // SAFETY: we are doing the linear mapping for the kernel.
238//             unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) }
239//                 .expect("Kernel linear address space is mapped twice");
240//         }
241//     }
242//     // Map the metadata pages.
243//     {
244//         let start_va = mapping::frame_to_meta::<PagingConsts>(0);
245//         let from = start_va..start_va + meta_pages.size();
246//         let prop = PageProperty {
247//             flags: PageFlags::RW,
248//             cache: CachePolicy::Writeback,
249//             priv_flags: PrivilegedPageFlags::GLOBAL,
250//         };
251//         let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
252//         // We use untracked mapping so that we can benefit from huge pages.
253//         // We won't unmap them anyway, so there's no leaking problem yet.
254//         // TODO: support tracked huge page mapping.
255//         let pa_range = meta_pages.into_raw();
256//         for (pa, level) in
257//             largest_pages::<KernelPtConfig>(from.start, pa_range.start, pa_range.len())
258//         {
259//             // SAFETY: We are doing the metadata mappings for the kernel.
260//             unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) }
261//                 .expect("Frame metadata address space is mapped twice");
262//         }
263//     }
264//     // In LoongArch64, we don't need to do linear mappings for the kernel code because of DMW0.
265//     #[cfg(not(target_arch = "loongarch64"))]
266//     // Map for the kernel code itself.
267//     // TODO: set separated permissions for each segments in the kernel.
268//     {
269//         let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions;
270//         let region = regions
271//             .iter()
272//             .find(|r| r.typ() == MemoryRegionType::Kernel)
273//             .unwrap();
274//         let offset = kernel_loaded_offset();
275//         let from = region.base() + offset..region.end() + offset;
276//         let prop = PageProperty {
277//             flags: PageFlags::RWX,
278//             cache: CachePolicy::Writeback,
279//             priv_flags: PrivilegedPageFlags::GLOBAL,
280//         };
281//         let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
282//         for (pa, level) in largest_pages::<KernelPtConfig>(from.start, region.base(), from.len()) {
283//             // SAFETY: we are doing the kernel code mapping.
284//             unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) }
285//                 .expect("Kernel code mapped twice");
286//         }
287//     }
288//     KERNEL_PAGE_TABLE.call_once(|| kpt);
289// }
290// /// Activates the kernel page table.
291// ///
292// /// # Safety
293// ///
294// /// This function should only be called once per CPU.
295// pub unsafe fn activate_kernel_page_table() {
296//     let kpt = KERNEL_PAGE_TABLE
297//         .get()
298//         .expect("The kernel page table is not initialized yet");
299//     // SAFETY: the kernel page table is initialized properly.
300//     unsafe {
301//         kpt.first_activate_unchecked();
302//         crate::arch::mm::tlb_flush_all_including_global();
303//     }
304//     // SAFETY: the boot page table is OK to be dismissed now since
305//     // the kernel page table is activated just now.
306//     unsafe {
307//         crate::mm::page_table::boot_pt::dismiss();
308//     }
309// }