1#![cfg_attr(target_arch = "loongarch64", expect(unused_imports))]
38
39pub(crate) mod kvirt_area;
40
41use core::ops::Range;
42
43use log::info;
44use spin::Once;
45#[cfg(ktest)]
46mod test;
47
48use super::{
49 Frame, HasSize, Paddr, PagingConstsTrait, Vaddr,
50 frame::{
51 Segment,
52 meta::{AnyFrameMeta, MetaPageMeta, mapping},
53 },
54 page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
55 page_table::{PageTable, PageTableConfig},
56};
57use crate::{
58 arch::mm::{PageTableEntry, PagingConsts},
59 boot::memory_region::MemoryRegionType,
60 const_assert,
61 mm::{PAGE_SIZE, PagingLevel, page_table::largest_pages},
62 task::disable_preempt,
63};
64
65const_assert!(PagingConsts::ADDRESS_WIDTH >= 39);
69const ADDR_WIDTH_SHIFT: usize = PagingConsts::ADDRESS_WIDTH - 39;
70
71#[cfg(not(target_arch = "loongarch64"))]
73pub const KERNEL_BASE_VADDR: Vaddr = 0xffff_ffc0_0000_0000 << ADDR_WIDTH_SHIFT;
74#[cfg(target_arch = "loongarch64")]
75pub const KERNEL_BASE_VADDR: Vaddr = 0x9000_0000_0000_0000;
76pub const KERNEL_END_VADDR: Vaddr = 0xffff_ffff_ffff_0000;
78
79pub const MAX_USERSPACE_VADDR: Vaddr = (0x0000_0040_0000_0000 << ADDR_WIDTH_SHIFT) - PAGE_SIZE;
89
90pub const KERNEL_VADDR_RANGE: Range<Vaddr> = KERNEL_BASE_VADDR..KERNEL_END_VADDR;
95
96pub fn kernel_loaded_offset() -> usize {
102 KERNEL_CODE_BASE_VADDR
103}
104
105#[cfg(target_arch = "x86_64")]
106const KERNEL_CODE_BASE_VADDR: usize = 0xffff_ffff_8000_0000;
107#[cfg(target_arch = "riscv64")]
108const KERNEL_CODE_BASE_VADDR: usize = 0xffff_ffff_0000_0000;
109#[cfg(target_arch = "loongarch64")]
110const KERNEL_CODE_BASE_VADDR: usize = 0x9000_0000_0000_0000;
111
112const FRAME_METADATA_CAP_VADDR: Vaddr = 0xffff_fff0_8000_0000 << ADDR_WIDTH_SHIFT;
113const FRAME_METADATA_BASE_VADDR: Vaddr = 0xffff_fff0_0000_0000 << ADDR_WIDTH_SHIFT;
114pub(in crate::mm) const FRAME_METADATA_RANGE: Range<Vaddr> =
115 FRAME_METADATA_BASE_VADDR..FRAME_METADATA_CAP_VADDR;
116
117const VMALLOC_BASE_VADDR: Vaddr = 0xffff_ffe0_0000_0000 << ADDR_WIDTH_SHIFT;
118pub const VMALLOC_VADDR_RANGE: Range<Vaddr> = VMALLOC_BASE_VADDR..FRAME_METADATA_BASE_VADDR;
119
120#[cfg(not(target_arch = "loongarch64"))]
123pub const LINEAR_MAPPING_BASE_VADDR: Vaddr = 0xffff_ffc0_0000_0000 << ADDR_WIDTH_SHIFT;
124#[cfg(target_arch = "loongarch64")]
125pub const LINEAR_MAPPING_BASE_VADDR: Vaddr = 0x9000_0000_0000_0000;
126pub const LINEAR_MAPPING_VADDR_RANGE: Range<Vaddr> = LINEAR_MAPPING_BASE_VADDR..VMALLOC_BASE_VADDR;
127
128pub fn paddr_to_vaddr(pa: Paddr) -> usize {
130 debug_assert!(pa < VMALLOC_BASE_VADDR - LINEAR_MAPPING_BASE_VADDR);
131 pa + LINEAR_MAPPING_BASE_VADDR
132}
133
134pub static KERNEL_PAGE_TABLE: Once<PageTable<KernelPtConfig>> = Once::new();
139
140#[derive(Clone, Debug)]
141pub(crate) struct KernelPtConfig {}
142
143unsafe impl PageTableConfig for KernelPtConfig {
146 const TOP_LEVEL_INDEX_RANGE: Range<usize> = 256..512;
147 const TOP_LEVEL_CAN_UNMAP: bool = false;
148
149 type E = PageTableEntry;
150 type C = PagingConsts;
151
152 type Item = MappedItem;
153
154 fn item_into_raw(item: Self::Item) -> (Paddr, PagingLevel, PageProperty) {
155 match item {
156 MappedItem::Tracked(frame, mut prop) => {
157 debug_assert!(!prop.priv_flags.contains(PrivilegedPageFlags::AVAIL1));
158 prop.priv_flags |= PrivilegedPageFlags::AVAIL1;
159 let level = frame.map_level();
160 let paddr = frame.into_raw();
161 (paddr, level, prop)
162 }
163 MappedItem::Untracked(pa, level, mut prop) => {
164 debug_assert!(!prop.priv_flags.contains(PrivilegedPageFlags::AVAIL1));
165 prop.priv_flags -= PrivilegedPageFlags::AVAIL1;
166 (pa, level, prop)
167 }
168 }
169 }
170
171 unsafe fn item_from_raw(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self::Item {
172 if prop.priv_flags.contains(PrivilegedPageFlags::AVAIL1) {
173 debug_assert_eq!(level, 1);
174 let frame = unsafe { Frame::<dyn AnyFrameMeta>::from_raw(paddr) };
176 MappedItem::Tracked(frame, prop)
177 } else {
178 MappedItem::Untracked(paddr, level, prop)
179 }
180 }
181}
182
183#[derive(Clone, Debug, PartialEq, Eq)]
184pub(crate) enum MappedItem {
185 Tracked(Frame<dyn AnyFrameMeta>, PageProperty),
186 Untracked(Paddr, PagingLevel, PageProperty),
187}
188
189pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
198 info!("Initializing the kernel page table");
199
200 let kpt = PageTable::<KernelPtConfig>::new_kernel_page_table();
202 let preempt_guard = disable_preempt();
203
204 #[cfg(not(target_arch = "loongarch64"))]
206 {
208 let max_paddr = crate::mm::frame::max_paddr();
209 let from = LINEAR_MAPPING_BASE_VADDR..LINEAR_MAPPING_BASE_VADDR + max_paddr;
210 let prop = PageProperty {
211 flags: PageFlags::RW,
212 cache: CachePolicy::Writeback,
213 priv_flags: PrivilegedPageFlags::GLOBAL,
214 };
215 let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
216 for (pa, level) in largest_pages::<KernelPtConfig>(from.start, 0, max_paddr) {
217 unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) }
219 .expect("Kernel linear address space is mapped twice");
220 }
221 }
222
223 {
225 let start_va = mapping::frame_to_meta::<PagingConsts>(0);
226 let from = start_va..start_va + meta_pages.size();
227 let prop = PageProperty {
228 flags: PageFlags::RW,
229 cache: CachePolicy::Writeback,
230 priv_flags: PrivilegedPageFlags::GLOBAL,
231 };
232 let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
233 let pa_range = meta_pages.into_raw();
237 for (pa, level) in
238 largest_pages::<KernelPtConfig>(from.start, pa_range.start, pa_range.len())
239 {
240 unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) }
242 .expect("Frame metadata address space is mapped twice");
243 }
244 }
245
246 #[cfg(not(target_arch = "loongarch64"))]
248 {
251 let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions;
252 let region = regions
253 .iter()
254 .find(|r| r.typ() == MemoryRegionType::Kernel)
255 .unwrap();
256 let offset = kernel_loaded_offset();
257 let from = region.base() + offset..region.end() + offset;
258 let prop = PageProperty {
259 flags: PageFlags::RWX,
260 cache: CachePolicy::Writeback,
261 priv_flags: PrivilegedPageFlags::GLOBAL,
262 };
263 let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
264 for (pa, level) in largest_pages::<KernelPtConfig>(from.start, region.base(), from.len()) {
265 unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) }
267 .expect("Kernel code mapped twice");
268 }
269 }
270
271 KERNEL_PAGE_TABLE.call_once(|| kpt);
272}
273
274pub unsafe fn activate_kernel_page_table() {
283 let kpt = KERNEL_PAGE_TABLE
284 .get()
285 .expect("The kernel page table is not initialized yet");
286 unsafe {
288 kpt.first_activate_unchecked();
289 crate::arch::mm::tlb_flush_all_including_global();
290 }
291}