1#![cfg_attr(target_arch = "loongarch64", expect(unused_imports))]
38
39pub(crate) mod kvirt_area;
40
41use core::ops::Range;
42
43use log::info;
44use spin::Once;
45#[cfg(ktest)]
46mod test;
47
48use super::{
49 Frame, HasSize, Paddr, PagingConstsTrait, Vaddr,
50 frame::{
51 Segment,
52 meta::{AnyFrameMeta, MetaPageMeta, mapping},
53 },
54 page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
55 page_table::{PageTable, PageTableConfig},
56};
57use crate::{
58 arch::mm::{PageTableEntry, PagingConsts},
59 boot::memory_region::MemoryRegionType,
60 const_assert,
61 mm::{HasPaddr, PAGE_SIZE, PagingLevel, frame::FrameRef, page_table::largest_pages},
62 task::disable_preempt,
63};
64
65const_assert!(PagingConsts::ADDRESS_WIDTH >= 39);
69const ADDR_WIDTH_SHIFT: usize = PagingConsts::ADDRESS_WIDTH - 39;
70
71#[cfg(not(target_arch = "loongarch64"))]
73pub const KERNEL_BASE_VADDR: Vaddr = 0xffff_ffc0_0000_0000 << ADDR_WIDTH_SHIFT;
74#[cfg(target_arch = "loongarch64")]
75pub const KERNEL_BASE_VADDR: Vaddr = 0x9000_0000_0000_0000;
76pub const KERNEL_END_VADDR: Vaddr = 0xffff_ffff_ffff_0000;
78
79pub const MAX_USERSPACE_VADDR: Vaddr = (0x0000_0040_0000_0000 << ADDR_WIDTH_SHIFT) - PAGE_SIZE;
89
90pub const KERNEL_VADDR_RANGE: Range<Vaddr> = KERNEL_BASE_VADDR..KERNEL_END_VADDR;
95
96pub fn kernel_loaded_offset() -> usize {
102 KERNEL_CODE_BASE_VADDR
103}
104
105#[cfg(target_arch = "x86_64")]
106const KERNEL_CODE_BASE_VADDR: usize = 0xffff_ffff_8000_0000;
107#[cfg(target_arch = "riscv64")]
108const KERNEL_CODE_BASE_VADDR: usize = 0xffff_ffff_0000_0000;
109#[cfg(target_arch = "loongarch64")]
110const KERNEL_CODE_BASE_VADDR: usize = 0x9000_0000_0000_0000;
111
112const FRAME_METADATA_CAP_VADDR: Vaddr = 0xffff_fff0_8000_0000 << ADDR_WIDTH_SHIFT;
113const FRAME_METADATA_BASE_VADDR: Vaddr = 0xffff_fff0_0000_0000 << ADDR_WIDTH_SHIFT;
114pub(in crate::mm) const FRAME_METADATA_RANGE: Range<Vaddr> =
115 FRAME_METADATA_BASE_VADDR..FRAME_METADATA_CAP_VADDR;
116
117const VMALLOC_BASE_VADDR: Vaddr = 0xffff_ffe0_0000_0000 << ADDR_WIDTH_SHIFT;
118pub const VMALLOC_VADDR_RANGE: Range<Vaddr> = VMALLOC_BASE_VADDR..FRAME_METADATA_BASE_VADDR;
119
120#[cfg(not(target_arch = "loongarch64"))]
123pub const LINEAR_MAPPING_BASE_VADDR: Vaddr = 0xffff_ffc0_0000_0000 << ADDR_WIDTH_SHIFT;
124#[cfg(target_arch = "loongarch64")]
125pub const LINEAR_MAPPING_BASE_VADDR: Vaddr = 0x9000_0000_0000_0000;
126pub const LINEAR_MAPPING_VADDR_RANGE: Range<Vaddr> = LINEAR_MAPPING_BASE_VADDR..VMALLOC_BASE_VADDR;
127
128pub fn paddr_to_vaddr(pa: Paddr) -> usize {
130 debug_assert!(pa < VMALLOC_BASE_VADDR - LINEAR_MAPPING_BASE_VADDR);
131 pa + LINEAR_MAPPING_BASE_VADDR
132}
133
134pub(super) static KERNEL_PAGE_TABLE: Once<PageTable<KernelPtConfig>> = Once::new();
139
140#[derive(Clone, Debug)]
141pub(super) struct KernelPtConfig {}
142
143unsafe impl PageTableConfig for KernelPtConfig {
148 const TOP_LEVEL_INDEX_RANGE: Range<usize> = 256..512;
149 const TOP_LEVEL_CAN_UNMAP: bool = false;
150
151 type E = PageTableEntry;
152 type C = PagingConsts;
153
154 type Item = MappedItem;
155 type ItemRef<'a> = MappedItemRef<'a>;
156
157 fn item_raw_info(item: &Self::Item) -> (Paddr, PagingLevel, PageProperty) {
158 match *item {
159 MappedItem::Tracked(ref frame, mut prop) => {
160 debug_assert!(!prop.priv_flags.contains(PrivilegedPageFlags::AVAIL1));
161 prop.priv_flags |= PrivilegedPageFlags::AVAIL1;
162 let level = frame.map_level();
163 let paddr = frame.paddr();
164 (paddr, level, prop)
165 }
166 MappedItem::Untracked(ref pa, ref level, mut prop) => {
167 debug_assert!(!prop.priv_flags.contains(PrivilegedPageFlags::AVAIL1));
168 prop.priv_flags -= PrivilegedPageFlags::AVAIL1;
169 (*pa, *level, prop)
170 }
171 }
172 }
173
174 unsafe fn item_from_raw(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self::Item {
175 if prop.priv_flags.contains(PrivilegedPageFlags::AVAIL1) {
176 debug_assert_eq!(level, 1);
177 let frame = unsafe { Frame::<dyn AnyFrameMeta>::from_raw(paddr) };
179 MappedItem::Tracked(frame, prop)
180 } else {
181 MappedItem::Untracked(paddr, level, prop)
182 }
183 }
184
185 unsafe fn item_ref_from_raw<'a>(
186 paddr: Paddr,
187 level: PagingLevel,
188 prop: PageProperty,
189 ) -> Self::ItemRef<'a> {
190 if prop.priv_flags.contains(PrivilegedPageFlags::AVAIL1) {
191 debug_assert_eq!(level, 1);
192 let frame = unsafe { FrameRef::<dyn AnyFrameMeta>::borrow_paddr(paddr) };
195 MappedItemRef::Tracked(frame, prop)
196 } else {
197 MappedItemRef::Untracked(paddr, level, prop)
198 }
199 }
200}
201
202#[derive(Clone, Debug, PartialEq, Eq)]
203pub(super) enum MappedItem {
204 Tracked(Frame<dyn AnyFrameMeta>, PageProperty),
205 Untracked(Paddr, PagingLevel, PageProperty),
206}
207
208#[derive(Debug)]
209pub(crate) enum MappedItemRef<'a> {
210 #[cfg_attr(not(ktest), expect(dead_code))]
211 Tracked(FrameRef<'a, dyn AnyFrameMeta>, PageProperty),
212 #[cfg_attr(not(ktest), expect(dead_code))]
213 Untracked(Paddr, PagingLevel, PageProperty),
214}
215
216pub fn init_kernel_page_table(meta_pages: Segment<MetaPageMeta>) {
225 info!("Initializing the kernel page table");
226
227 let kpt = PageTable::<KernelPtConfig>::new_kernel_page_table();
229 let preempt_guard = disable_preempt();
230
231 #[cfg(not(target_arch = "loongarch64"))]
233 {
235 let max_paddr = crate::mm::frame::max_paddr();
236 let from = LINEAR_MAPPING_BASE_VADDR..LINEAR_MAPPING_BASE_VADDR + max_paddr;
237 let prop = PageProperty {
238 flags: PageFlags::RW,
239 cache: CachePolicy::Writeback,
240 priv_flags: PrivilegedPageFlags::GLOBAL,
241 };
242 let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
243 for (pa, level) in largest_pages::<KernelPtConfig>(from.start, 0, max_paddr) {
244 unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) };
246 }
247 }
248
249 {
251 let start_va = mapping::frame_to_meta::<PagingConsts>(0);
252 let from = start_va..start_va + meta_pages.size();
253 let prop = PageProperty {
254 flags: PageFlags::RW,
255 cache: CachePolicy::Writeback,
256 priv_flags: PrivilegedPageFlags::GLOBAL,
257 };
258 let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
259 let pa_range = meta_pages.into_raw();
263 for (pa, level) in
264 largest_pages::<KernelPtConfig>(from.start, pa_range.start, pa_range.len())
265 {
266 unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) };
268 }
269 }
270
271 #[cfg(not(target_arch = "loongarch64"))]
273 {
276 let regions = &crate::boot::EARLY_INFO.get().unwrap().memory_regions;
277 let region = regions
278 .iter()
279 .find(|r| r.typ() == MemoryRegionType::Kernel)
280 .unwrap();
281 let offset = kernel_loaded_offset();
282 let from = region.base() + offset..region.end() + offset;
283 let prop = PageProperty {
284 flags: PageFlags::RWX,
285 cache: CachePolicy::Writeback,
286 priv_flags: PrivilegedPageFlags::GLOBAL,
287 };
288 let mut cursor = kpt.cursor_mut(&preempt_guard, &from).unwrap();
289 for (pa, level) in largest_pages::<KernelPtConfig>(from.start, region.base(), from.len()) {
290 unsafe { cursor.map(MappedItem::Untracked(pa, level, prop)) };
292 }
293 }
294
295 KERNEL_PAGE_TABLE.call_once(|| kpt);
296}
297
298pub unsafe fn activate_kernel_page_table() {
307 let kpt = KERNEL_PAGE_TABLE
308 .get()
309 .expect("The kernel page table is not initialized yet");
310 unsafe {
312 kpt.first_activate_unchecked();
313 crate::arch::mm::tlb_flush_all_including_global();
314 }
315}