ostd/mm/vm_space.rs
1// SPDX-License-Identifier: MPL-2.0
2
3//! Virtual memory space management.
4//!
5//! The [`VmSpace`] struct is provided to manage the virtual memory space of a
6//! user. Cursors are used to traverse and modify over the virtual memory space
7//! concurrently. The VM space cursor [`self::Cursor`] is just a wrapper over
8//! the page table cursor, providing efficient, powerful concurrent accesses
9//! to the page table.
10
11use core::{ops::Range, sync::atomic::Ordering};
12
13use super::{AnyUFrameMeta, PagingLevel, page_table::PageTableConfig};
14use crate::{
15 Error,
16 arch::mm::{PageTableEntry, PagingConsts, current_page_table_paddr},
17 cpu::{AtomicCpuSet, CpuSet, PinCurrentCpu},
18 cpu_local_cell,
19 io::IoMem,
20 mm::{
21 Frame, HasPaddr, MAX_USERSPACE_VADDR, PAGE_SIZE, PageProperty, PrivilegedPageFlags, UFrame,
22 VmReader, VmWriter,
23 frame::FrameRef,
24 io::Fallible,
25 kspace::KERNEL_PAGE_TABLE,
26 page_prop::{CachePolicy, PageFlags},
27 page_table::{self, PageTable, PageTableFrag},
28 tlb::{TlbFlushOp, TlbFlusher},
29 },
30 prelude::*,
31 sync::{RcuDrop, SpinLock},
32 task::{DisabledPreemptGuard, atomic_mode::AsAtomicModeGuard, disable_preempt},
33};
34
35/// A virtual address space for user-mode tasks, enabling safe manipulation of user-space memory.
36///
37/// The `VmSpace` type provides memory isolation guarantees between user-space and
38/// kernel-space. For example, given an arbitrary user-space pointer, one can read and
39/// write the memory location referred to by the user-space pointer without the risk of
40/// breaking the memory safety of the kernel space.
41///
42/// # Task Association Semantics
43///
44/// As far as OSTD is concerned, a `VmSpace` is not necessarily associated with a task. Once a
45/// `VmSpace` is activated (see [`VmSpace::activate`]), it remains activated until another
46/// `VmSpace` is activated **possibly by another task running on the same CPU**.
47///
48/// This means that it's up to the kernel to ensure that a task's `VmSpace` is always activated
49/// while the task is running. This can be done by using the injected post schedule handler
50/// (see [`inject_post_schedule_handler`]) to always activate the correct `VmSpace` after each
51/// context switch.
52///
53/// If the kernel otherwise decides not to ensure that the running task's `VmSpace` is always
54/// activated, the kernel must deal with race conditions when calling methods that require the
55/// `VmSpace` to be activated, e.g., [`UserMode::execute`], [`VmSpace::reader`],
56/// [`VmSpace::writer`]. Otherwise, the behavior is unspecified, though it's guaranteed _not_ to
57/// compromise the kernel's memory safety.
58///
59/// # Memory Backing
60///
61/// A newly-created `VmSpace` is not backed by any physical memory pages. To
62/// provide memory pages for a `VmSpace`, one can allocate and map physical
63/// memory ([`UFrame`]s) to the `VmSpace` using the cursor.
64///
65/// A `VmSpace` can also attach a page fault handler, which will be invoked to
66/// handle page faults generated from user space.
67///
68/// [`inject_post_schedule_handler`]: crate::task::inject_post_schedule_handler
69/// [`UserMode::execute`]: crate::user::UserMode::execute
70#[derive(Debug)]
71pub struct VmSpace {
72 pt: PageTable<UserPtConfig>,
73 cpus: AtomicCpuSet,
74 iomems: SpinLock<Vec<IoMem>>,
75}
76
77impl VmSpace {
78 /// Creates a new VM address space.
79 pub fn new() -> Self {
80 Self {
81 pt: KERNEL_PAGE_TABLE.get().unwrap().create_user_page_table(),
82 cpus: AtomicCpuSet::new(CpuSet::new_empty()),
83 iomems: SpinLock::new(Vec::new()),
84 }
85 }
86
87 /// Gets an immutable cursor in the virtual address range.
88 ///
89 /// The cursor behaves like a lock guard, exclusively owning a sub-tree of
90 /// the page table, preventing others from creating a cursor in it. So be
91 /// sure to drop the cursor as soon as possible.
92 ///
93 /// The creation of the cursor may block if another cursor having an
94 /// overlapping range is alive.
95 pub fn cursor<'a, G: AsAtomicModeGuard>(
96 &'a self,
97 guard: &'a G,
98 va: &Range<Vaddr>,
99 ) -> Result<Cursor<'a>> {
100 Ok(Cursor(self.pt.cursor(guard, va)?))
101 }
102
103 /// Gets an mutable cursor in the virtual address range.
104 ///
105 /// The same as [`Self::cursor`], the cursor behaves like a lock guard,
106 /// exclusively owning a sub-tree of the page table, preventing others
107 /// from creating a cursor in it. So be sure to drop the cursor as soon as
108 /// possible.
109 ///
110 /// The creation of the cursor may block if another cursor having an
111 /// overlapping range is alive. The modification to the mapping by the
112 /// cursor may also block or be overridden the mapping of another cursor.
113 pub fn cursor_mut<'a, G: AsAtomicModeGuard>(
114 &'a self,
115 guard: &'a G,
116 va: &Range<Vaddr>,
117 ) -> Result<CursorMut<'a>> {
118 Ok(CursorMut {
119 pt_cursor: self.pt.cursor_mut(guard, va)?,
120 flusher: TlbFlusher::new(&self.cpus, disable_preempt()),
121 vmspace: self,
122 })
123 }
124
125 /// Activates the page table on the current CPU.
126 pub fn activate(self: &Arc<Self>) {
127 let preempt_guard = disable_preempt();
128 let cpu = preempt_guard.current_cpu();
129
130 let last_ptr = ACTIVATED_VM_SPACE.load();
131
132 if last_ptr == Arc::as_ptr(self) {
133 return;
134 }
135
136 // Record ourselves in the CPU set and the activated VM space pointer.
137 // `Acquire` to ensure the modification to the PT is visible by this CPU.
138 self.cpus.add(cpu, Ordering::Acquire);
139
140 let self_ptr = Arc::into_raw(Arc::clone(self)) as *mut VmSpace;
141 ACTIVATED_VM_SPACE.store(self_ptr);
142
143 if !last_ptr.is_null() {
144 // SAFETY: The pointer is cast from an `Arc` when it's activated
145 // the last time, so it can be restored and only restored once.
146 let last = unsafe { Arc::from_raw(last_ptr) };
147 last.cpus.remove(cpu, Ordering::Relaxed);
148 }
149
150 self.pt.activate();
151 }
152
153 /// Creates a reader to read data from the user space of the current task.
154 ///
155 /// Returns `Err` if this `VmSpace` doesn't belong to the user space of the current task
156 /// or the `vaddr` and `len` do not represent a user space memory range.
157 ///
158 /// Users must ensure that no other page table is activated in the current task during the
159 /// lifetime of the created `VmReader`. This guarantees that the `VmReader` can operate correctly.
160 pub fn reader(&self, vaddr: Vaddr, len: usize) -> Result<VmReader<'_, Fallible>> {
161 if current_page_table_paddr() != self.pt.root_paddr() {
162 return Err(Error::AccessDenied);
163 }
164
165 if vaddr.saturating_add(len) > MAX_USERSPACE_VADDR {
166 return Err(Error::AccessDenied);
167 }
168
169 // SAFETY: The memory range is in user space, as checked above.
170 Ok(unsafe { VmReader::<Fallible>::from_user_space(vaddr as *const u8, len) })
171 }
172
173 /// Creates a writer to write data into the user space.
174 ///
175 /// Returns `Err` if this `VmSpace` doesn't belong to the user space of the current task
176 /// or the `vaddr` and `len` do not represent a user space memory range.
177 ///
178 /// Users must ensure that no other page table is activated in the current task during the
179 /// lifetime of the created `VmWriter`. This guarantees that the `VmWriter` can operate correctly.
180 pub fn writer(&self, vaddr: Vaddr, len: usize) -> Result<VmWriter<'_, Fallible>> {
181 if current_page_table_paddr() != self.pt.root_paddr() {
182 return Err(Error::AccessDenied);
183 }
184
185 if vaddr.saturating_add(len) > MAX_USERSPACE_VADDR {
186 return Err(Error::AccessDenied);
187 }
188
189 // `VmWriter` is neither `Sync` nor `Send`, so it will not live longer than the current
190 // task. This ensures that the correct page table is activated during the usage period of
191 // the `VmWriter`.
192 //
193 // SAFETY: The memory range is in user space, as checked above.
194 Ok(unsafe { VmWriter::<Fallible>::from_user_space(vaddr as *mut u8, len) })
195 }
196
197 /// Creates a reader/writer pair to read data from and write data into the user space.
198 ///
199 /// Returns `Err` if this `VmSpace` doesn't belong to the user space of the current task
200 /// or the `vaddr` and `len` do not represent a user space memory range.
201 ///
202 /// Users must ensure that no other page table is activated in the current task during the
203 /// lifetime of the created `VmReader` and `VmWriter`. This guarantees that the `VmReader`
204 /// and the `VmWriter` can operate correctly.
205 ///
206 /// This method is semantically equivalent to calling [`Self::reader`] and [`Self::writer`]
207 /// separately, but it avoids double checking the validity of the memory region.
208 pub fn reader_writer(
209 &self,
210 vaddr: Vaddr,
211 len: usize,
212 ) -> Result<(VmReader<'_, Fallible>, VmWriter<'_, Fallible>)> {
213 if current_page_table_paddr() != self.pt.root_paddr() {
214 return Err(Error::AccessDenied);
215 }
216
217 if vaddr.saturating_add(len) > MAX_USERSPACE_VADDR {
218 return Err(Error::AccessDenied);
219 }
220
221 // SAFETY: The memory range is in user space, as checked above.
222 let reader = unsafe { VmReader::<Fallible>::from_user_space(vaddr as *const u8, len) };
223
224 // `VmWriter` is neither `Sync` nor `Send`, so it will not live longer than the current
225 // task. This ensures that the correct page table is activated during the usage period of
226 // the `VmWriter`.
227 //
228 // SAFETY: The memory range is in user space, as checked above.
229 let writer = unsafe { VmWriter::<Fallible>::from_user_space(vaddr as *mut u8, len) };
230
231 Ok((reader, writer))
232 }
233}
234
235impl Default for VmSpace {
236 fn default() -> Self {
237 Self::new()
238 }
239}
240
241impl VmSpace {
242 /// Finds the [`IoMem`] that contains the given physical address.
243 ///
244 /// It is a private method for internal use only. Please refer to
245 /// [`CursorMut::find_iomem_by_paddr`] for more details.
246 fn find_iomem_by_paddr(&self, paddr: Paddr) -> Option<(IoMem, usize)> {
247 let iomems = self.iomems.lock();
248 for iomem in iomems.iter() {
249 let start = iomem.paddr();
250 let end = start + iomem.size();
251 if paddr >= start && paddr < end {
252 let offset = paddr - start;
253 return Some((iomem.clone(), offset));
254 }
255 }
256 None
257 }
258}
259
260/// The cursor for querying over the VM space without modifying it.
261///
262/// It exclusively owns a sub-tree of the page table, preventing others from
263/// reading or modifying the same sub-tree. Two read-only cursors can not be
264/// created from the same virtual address range either.
265pub struct Cursor<'a>(page_table::Cursor<'a, UserPtConfig>);
266
267impl Cursor<'_> {
268 /// Queries the mapping at the current virtual address.
269 ///
270 /// If the cursor is pointing to a valid virtual address that is locked,
271 /// it will return the virtual address range and the mapped item.
272 pub fn query(&mut self) -> Result<(Range<Vaddr>, Option<VmQueriedItem<'_>>)> {
273 let (range, item) = self.0.query()?;
274 Ok((range, item.map(VmQueriedItem::from)))
275 }
276
277 /// Moves the cursor forward to the next mapped virtual address.
278 ///
279 /// If there is mapped virtual address following the current address within
280 /// next `len` bytes, it will return that mapped address. In this case,
281 /// the cursor will stop at the mapped address.
282 ///
283 /// Otherwise, it will return `None`. And the cursor may stop at any
284 /// address after `len` bytes.
285 ///
286 /// # Panics
287 ///
288 /// Panics if the length is longer than the remaining range of the cursor.
289 pub fn find_next(&mut self, len: usize) -> Option<Vaddr> {
290 self.0.find_next(len)
291 }
292
293 /// Jumps to the virtual address.
294 pub fn jump(&mut self, va: Vaddr) -> Result<()> {
295 self.0.jump(va)?;
296 Ok(())
297 }
298
299 /// Gets the virtual address of the current slot.
300 pub fn virt_addr(&self) -> Vaddr {
301 self.0.virt_addr()
302 }
303}
304
305/// The cursor for modifying the mappings in VM space.
306///
307/// It exclusively owns a sub-tree of the page table, preventing others from
308/// reading or modifying the same sub-tree.
309pub struct CursorMut<'a> {
310 pt_cursor: page_table::CursorMut<'a, UserPtConfig>,
311 // We have a read lock so the CPU set in the flusher is always a superset
312 // of actual activated CPUs.
313 flusher: TlbFlusher<'a, DisabledPreemptGuard>,
314 // References to the `VmSpace`
315 vmspace: &'a VmSpace,
316}
317
318impl<'a> CursorMut<'a> {
319 /// Queries the mapping at the current virtual address.
320 ///
321 /// This is the same as [`Cursor::query`].
322 ///
323 /// If the cursor is pointing to a valid virtual address that is locked,
324 /// it will return the virtual address range and the mapped item.
325 pub fn query(&mut self) -> Result<(Range<Vaddr>, Option<VmQueriedItem<'_>>)> {
326 let (range, item) = self.pt_cursor.query()?;
327 Ok((range, item.map(VmQueriedItem::from)))
328 }
329
330 /// Moves the cursor forward to the next mapped virtual address.
331 ///
332 /// This is the same as [`Cursor::find_next`].
333 pub fn find_next(&mut self, len: usize) -> Option<Vaddr> {
334 self.pt_cursor.find_next(len)
335 }
336
337 /// Jumps to the virtual address.
338 ///
339 /// This is the same as [`Cursor::jump`].
340 pub fn jump(&mut self, va: Vaddr) -> Result<()> {
341 self.pt_cursor.jump(va)?;
342 Ok(())
343 }
344
345 /// Gets the virtual address of the current slot.
346 pub fn virt_addr(&self) -> Vaddr {
347 self.pt_cursor.virt_addr()
348 }
349
350 /// Gets the dedicated TLB flusher for this cursor.
351 pub fn flusher(&mut self) -> &mut TlbFlusher<'a, DisabledPreemptGuard> {
352 &mut self.flusher
353 }
354
355 /// Maps a frame into the current slot.
356 ///
357 /// This method will bring the cursor to the next slot after the modification.
358 ///
359 /// # Panics
360 ///
361 /// Panics if the current virtual address is already mapped.
362 pub fn map(&mut self, frame: UFrame, prop: PageProperty) {
363 let item = VmItem::new_tracked(frame, prop);
364
365 // SAFETY: It is safe to map untyped memory into the userspace.
366 unsafe { self.pt_cursor.map(item) };
367 }
368
369 /// Maps a range of [`IoMem`] into the current slot.
370 ///
371 /// The memory region to be mapped is the [`IoMem`] range starting at
372 /// `offset` and extending to `offset + len`, or to the end of [`IoMem`],
373 /// whichever comes first. This method will bring the cursor to the next
374 /// slot after the modification.
375 ///
376 /// # Limitations
377 ///
378 /// Once an instance of `IoMem` is mapped to a `VmSpace`,
379 /// then the `IoMem` instance will only be dropped when the `VmSpace` is
380 /// dropped, not when all the mappings backed by the `IoMem` are destroyed
381 /// with the `unmap` method.
382 ///
383 /// # Panics
384 ///
385 /// Panics if
386 /// - `len` or `offset` is not aligned to the page size;
387 /// - the current virtual address is already mapped.
388 pub fn map_iomem(&mut self, io_mem: IoMem, prop: PageProperty, len: usize, offset: usize) {
389 assert_eq!(len % PAGE_SIZE, 0);
390 assert_eq!(offset % PAGE_SIZE, 0);
391
392 if offset >= io_mem.size() {
393 return;
394 }
395
396 let paddr_begin = io_mem.paddr() + offset;
397 let paddr_end = if io_mem.size() - offset < len {
398 io_mem.paddr() + io_mem.size()
399 } else {
400 io_mem.paddr() + len + offset
401 };
402
403 for current_paddr in (paddr_begin..paddr_end).step_by(PAGE_SIZE) {
404 // SAFETY: It is safe to map I/O memory into the userspace.
405 unsafe {
406 self.pt_cursor
407 .map(VmItem::new_untracked_io(current_paddr, prop))
408 };
409 }
410
411 // If the `iomems` list in `VmSpace` does not contain the current I/O
412 // memory, push it to maintain the correct reference count.
413 let mut iomems = self.vmspace.iomems.lock();
414 if !iomems
415 .iter()
416 .any(|iomem| iomem.paddr() == io_mem.paddr() && iomem.size() == io_mem.size())
417 {
418 iomems.push(io_mem);
419 }
420 }
421
422 /// Finds an [`IoMem`] that was previously mapped to by [`Self::map_iomem`] and contains the
423 /// physical address.
424 ///
425 /// This method can recover the originally mapped `IoMem` from the physical address returned by
426 /// [`Self::query`]. If the query returns a [`VmQueriedItem::MappedIoMem`], this method is
427 /// guaranteed to succeed with the specific physical address. However, if the corresponding
428 /// mapping is subsequently unmapped, it is unspecified whether this method will still succeed
429 /// or not.
430 ///
431 /// On success, this method returns the `IoMem` and the offset from the `IoMem` start to the
432 /// given physical address. Otherwise, this method returns `None`.
433 pub fn find_iomem_by_paddr(&self, paddr: Paddr) -> Option<(IoMem, usize)> {
434 self.vmspace.find_iomem_by_paddr(paddr)
435 }
436
437 /// Clears the mapping starting from the current slot,
438 /// and returns the number of unmapped pages.
439 ///
440 /// This method will bring the cursor forward by `len` bytes in the virtual
441 /// address space after the modification.
442 ///
443 /// Already-absent mappings encountered by the cursor will be skipped. It
444 /// is valid to unmap a range that is not mapped.
445 ///
446 /// It must issue and dispatch a TLB flush after the operation. Otherwise,
447 /// the memory safety will be compromised. Please call this function less
448 /// to avoid the overhead of TLB flush. Using a large `len` is wiser than
449 /// splitting the operation into multiple small ones.
450 ///
451 /// # Panics
452 ///
453 /// Panics if:
454 /// - the length is longer than the remaining range of the cursor;
455 /// - the length is not page-aligned.
456 pub fn unmap(&mut self, len: usize) -> usize {
457 let end_va = self.virt_addr() + len;
458 let mut num_unmapped: usize = 0;
459 loop {
460 // SAFETY: It is safe to un-map memory in the userspace. And the
461 // un-mapped items are dropped after TLB flushes.
462 let Some(frag) = (unsafe { self.pt_cursor.take_next(end_va - self.virt_addr()) })
463 else {
464 break; // No more mappings in the range.
465 };
466
467 match frag {
468 PageTableFrag::Mapped { va, item, .. } => {
469 // SAFETY: If the item is not a scalar (e.g., a frame
470 // pointer), we will drop it after the RCU grace period
471 // (see `issue_tlb_flush_with`).
472 let (item, panic_guard) = unsafe { RcuDrop::into_inner(item) };
473
474 match item {
475 VmItem {
476 mapped_item: MappedItem::TrackedFrame(old_frame),
477 ..
478 } => {
479 num_unmapped += 1;
480
481 let rcu_frame = RcuDrop::new(old_frame);
482 panic_guard.forget();
483 let rcu_frame = Frame::rcu_from_unsized(rcu_frame);
484 self.flusher
485 .issue_tlb_flush_with(TlbFlushOp::for_single(va), rcu_frame);
486 }
487 VmItem {
488 mapped_item: MappedItem::UntrackedIoMem { .. },
489 ..
490 } => {
491 panic_guard.forget();
492
493 // Flush the TLB entry for the current address, but
494 // in the current design, we cannot drop the
495 // corresponding `IoMem`. This is because we manage
496 // the range of I/O as a whole, but the frames
497 // handled here might be one segment of it.
498 self.flusher.issue_tlb_flush(TlbFlushOp::for_single(va));
499 }
500 }
501 }
502 PageTableFrag::StrayPageTable {
503 pt,
504 va,
505 len,
506 num_frames,
507 } => {
508 num_unmapped += num_frames;
509
510 self.flusher.issue_tlb_flush_with(
511 TlbFlushOp::for_range(va..va + len),
512 Frame::rcu_from_unsized(pt),
513 );
514 }
515 }
516 }
517
518 self.flusher.dispatch_tlb_flush();
519
520 num_unmapped
521 }
522
523 /// Applies the operation to the next slot of mapping within the range.
524 ///
525 /// The range to be found in is the current virtual address with the
526 /// provided length.
527 ///
528 /// The function stops and yields the actually protected range if it has
529 /// actually protected a page, no matter if the following pages are also
530 /// required to be protected.
531 ///
532 /// It also makes the cursor moves forward to the next page after the
533 /// protected one. If no mapped pages exist in the following range, the
534 /// cursor will stop at the end of the range and return [`None`].
535 ///
536 /// Note that it will **NOT** flush the TLB after the operation. Please
537 /// make the decision yourself on when and how to flush the TLB using
538 /// [`Self::flusher`].
539 ///
540 /// # Panics
541 ///
542 /// Panics if the length is longer than the remaining range of the cursor.
543 pub fn protect_next(
544 &mut self,
545 len: usize,
546 mut op: impl FnMut(&mut PageFlags, &mut CachePolicy),
547 ) -> Option<Range<Vaddr>> {
548 // SAFETY: It is safe to set `PageFlags` and `CachePolicy` of memory
549 // in the userspace.
550 unsafe {
551 self.pt_cursor.protect_next(len, &mut |prop| {
552 op(&mut prop.flags, &mut prop.cache);
553 })
554 }
555 }
556}
557
558cpu_local_cell! {
559 /// The `Arc` pointer to the activated VM space on this CPU. If the pointer
560 /// is NULL, it means that the activated page table is merely the kernel
561 /// page table.
562 // TODO: If we are enabling ASID, we need to maintain the TLB state of each
563 // CPU, rather than merely the activated `VmSpace`. When ASID is enabled,
564 // the non-active `VmSpace`s can still have their TLB entries in the CPU!
565 static ACTIVATED_VM_SPACE: *const VmSpace = core::ptr::null();
566}
567
568#[cfg(ktest)]
569pub(super) fn get_activated_vm_space() -> *const VmSpace {
570 ACTIVATED_VM_SPACE.load()
571}
572
573/// The result of a query over the VM space.
574pub enum VmQueriedItem<'a> {
575 /// The current slot is mapped, the frame within is allocated from the
576 /// physical memory.
577 MappedRam {
578 /// The mapped frame.
579 frame: FrameRef<'a, dyn AnyUFrameMeta>,
580 /// The property of the slot.
581 prop: PageProperty,
582 },
583 /// The current slot is mapped, the frame within is allocated from the
584 /// MMIO memory.
585 MappedIoMem {
586 /// The physical address of the corresponding I/O memory.
587 paddr: Paddr,
588 /// The property of the slot.
589 prop: PageProperty,
590 },
591}
592
593impl VmQueriedItem<'_> {
594 /// Returns the page property of the mapped item.
595 pub fn prop(&self) -> &PageProperty {
596 match self {
597 Self::MappedRam { prop, .. } => prop,
598 Self::MappedIoMem { prop, .. } => prop,
599 }
600 }
601}
602
603/// Internal representation of a VM item.
604///
605/// This is kept private to ensure memory safety. The public interface
606/// should use `VmQueriedItem` for querying mapping information.
607#[derive(Debug, Clone, PartialEq)]
608pub(crate) struct VmItem {
609 prop: PageProperty,
610 mapped_item: MappedItem,
611}
612
613/// A reference to a VM item.
614#[derive(Debug)]
615pub(crate) struct VmItemRef<'a> {
616 prop: PageProperty,
617 mapped_item: MappedItemRef<'a>,
618}
619
620#[derive(Debug, Clone, PartialEq)]
621enum MappedItem {
622 TrackedFrame(UFrame),
623 UntrackedIoMem { paddr: Paddr, level: PagingLevel },
624}
625
626#[derive(Debug)]
627enum MappedItemRef<'a> {
628 TrackedFrame(FrameRef<'a, dyn AnyUFrameMeta>),
629 UntrackedIoMem { paddr: Paddr, level: PagingLevel },
630}
631
632impl VmItem {
633 /// Creates a new `VmItem` that maps a tracked frame.
634 pub(super) fn new_tracked(frame: UFrame, prop: PageProperty) -> Self {
635 Self {
636 prop,
637 mapped_item: MappedItem::TrackedFrame(frame),
638 }
639 }
640
641 /// Creates a new `VmItem` that maps an untracked I/O memory.
642 fn new_untracked_io(paddr: Paddr, prop: PageProperty) -> Self {
643 Self {
644 prop,
645 mapped_item: MappedItem::UntrackedIoMem { paddr, level: 1 },
646 }
647 }
648}
649
650impl<'a> From<VmItemRef<'a>> for VmQueriedItem<'a> {
651 fn from(item: VmItemRef<'a>) -> Self {
652 match item.mapped_item {
653 MappedItemRef::TrackedFrame(frame) => VmQueriedItem::MappedRam {
654 frame,
655 prop: item.prop,
656 },
657 MappedItemRef::UntrackedIoMem { paddr, level } => {
658 debug_assert_eq!(level, 1);
659 VmQueriedItem::MappedIoMem {
660 paddr,
661 prop: item.prop,
662 }
663 }
664 }
665 }
666}
667
668#[derive(Clone, Debug)]
669pub(crate) struct UserPtConfig {}
670
671// SAFETY: `item_raw_info`, `item_into_raw`, `item_from_raw`, and
672// `item_ref_from_raw` are correctly implemented with respect to the `Item` and
673// `ItemRef` types.
674unsafe impl PageTableConfig for UserPtConfig {
675 const TOP_LEVEL_INDEX_RANGE: Range<usize> = 0..256;
676
677 type E = PageTableEntry;
678 type C = PagingConsts;
679
680 type Item = VmItem;
681 type ItemRef<'a> = VmItemRef<'a>;
682
683 fn item_raw_info(item: &Self::Item) -> (Paddr, PagingLevel, PageProperty) {
684 match &item.mapped_item {
685 MappedItem::TrackedFrame(frame) => {
686 let mut prop = item.prop;
687 prop.priv_flags -= PrivilegedPageFlags::AVAIL1; // Clear AVAIL1 for tracked frames
688 let level = frame.map_level();
689 let paddr = frame.paddr();
690 (paddr, level, prop)
691 }
692 MappedItem::UntrackedIoMem { paddr, level } => {
693 let mut prop = item.prop;
694 prop.priv_flags |= PrivilegedPageFlags::AVAIL1; // Set AVAIL1 for I/O memory
695 (*paddr, *level, prop)
696 }
697 }
698 }
699
700 unsafe fn item_from_raw(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self::Item {
701 debug_assert_eq!(level, 1);
702 if prop.priv_flags.contains(PrivilegedPageFlags::AVAIL1) {
703 // `AVAIL1` is set, this is I/O memory.
704 VmItem::new_untracked_io(paddr, prop)
705 } else {
706 // `AVAIL1` is clear, this is tracked memory.
707 // SAFETY: The caller ensures safety.
708 let frame = unsafe { Frame::<dyn AnyUFrameMeta>::from_raw(paddr) };
709 VmItem::new_tracked(frame, prop)
710 }
711 }
712
713 unsafe fn item_ref_from_raw<'a>(
714 paddr: Paddr,
715 level: PagingLevel,
716 prop: PageProperty,
717 ) -> Self::ItemRef<'a> {
718 debug_assert_eq!(level, 1);
719 if prop.priv_flags.contains(PrivilegedPageFlags::AVAIL1) {
720 // `AVAIL1` is set, this is I/O memory.
721 VmItemRef {
722 prop,
723 mapped_item: MappedItemRef::UntrackedIoMem { paddr, level },
724 }
725 } else {
726 // `AVAIL1` is clear, this is tracked memory.
727 // SAFETY: The caller ensures that the frame outlives `'a` and that
728 // the type matches the frame.
729 let frame_ref = unsafe { FrameRef::<dyn AnyUFrameMeta>::borrow_paddr(paddr) };
730 VmItemRef {
731 prop,
732 mapped_item: MappedItemRef::TrackedFrame(frame_ref),
733 }
734 }
735 }
736}