Skip to main content

ostd/mm/
vm_space.rs

1// SPDX-License-Identifier: MPL-2.0
2
3//! Virtual memory space management.
4//!
5//! The [`VmSpace`] struct is provided to manage the virtual memory space of a
6//! user. Cursors are used to traverse and modify over the virtual memory space
7//! concurrently. The VM space cursor [`self::Cursor`] is just a wrapper over
8//! the page table cursor, providing efficient, powerful concurrent accesses
9//! to the page table.
10
11use core::{ops::Range, sync::atomic::Ordering};
12
13use super::{AnyUFrameMeta, PagingLevel, page_table::PageTableConfig};
14use crate::{
15    Error,
16    arch::mm::{PageTableEntry, PagingConsts, current_page_table_paddr},
17    cpu::{AtomicCpuSet, CpuSet, PinCurrentCpu},
18    cpu_local_cell,
19    io::IoMem,
20    mm::{
21        Frame, HasPaddr, MAX_USERSPACE_VADDR, PAGE_SIZE, PageProperty, PrivilegedPageFlags, UFrame,
22        VmReader, VmWriter,
23        frame::FrameRef,
24        io::Fallible,
25        kspace::KERNEL_PAGE_TABLE,
26        page_prop::{CachePolicy, PageFlags},
27        page_table::{self, PageTable, PageTableFrag},
28        tlb::{TlbFlushOp, TlbFlusher},
29    },
30    prelude::*,
31    sync::{RcuDrop, SpinLock},
32    task::{DisabledPreemptGuard, atomic_mode::AsAtomicModeGuard, disable_preempt},
33};
34
35/// A virtual address space for user-mode tasks, enabling safe manipulation of user-space memory.
36///
37/// The `VmSpace` type provides memory isolation guarantees between user-space and
38/// kernel-space. For example, given an arbitrary user-space pointer, one can read and
39/// write the memory location referred to by the user-space pointer without the risk of
40/// breaking the memory safety of the kernel space.
41///
42/// # Task Association Semantics
43///
44/// As far as OSTD is concerned, a `VmSpace` is not necessarily associated with a task. Once a
45/// `VmSpace` is activated (see [`VmSpace::activate`]), it remains activated until another
46/// `VmSpace` is activated **possibly by another task running on the same CPU**.
47///
48/// This means that it's up to the kernel to ensure that a task's `VmSpace` is always activated
49/// while the task is running. This can be done by using the injected post schedule handler
50/// (see [`inject_post_schedule_handler`]) to always activate the correct `VmSpace` after each
51/// context switch.
52///
53/// If the kernel otherwise decides not to ensure that the running task's `VmSpace` is always
54/// activated, the kernel must deal with race conditions when calling methods that require the
55/// `VmSpace` to be activated, e.g., [`UserMode::execute`], [`VmSpace::reader`],
56/// [`VmSpace::writer`]. Otherwise, the behavior is unspecified, though it's guaranteed _not_ to
57/// compromise the kernel's memory safety.
58///
59/// # Memory Backing
60///
61/// A newly-created `VmSpace` is not backed by any physical memory pages. To
62/// provide memory pages for a `VmSpace`, one can allocate and map physical
63/// memory ([`UFrame`]s) to the `VmSpace` using the cursor.
64///
65/// A `VmSpace` can also attach a page fault handler, which will be invoked to
66/// handle page faults generated from user space.
67///
68/// [`inject_post_schedule_handler`]: crate::task::inject_post_schedule_handler
69/// [`UserMode::execute`]: crate::user::UserMode::execute
70#[derive(Debug)]
71pub struct VmSpace {
72    pt: PageTable<UserPtConfig>,
73    cpus: AtomicCpuSet,
74    iomems: SpinLock<Vec<IoMem>>,
75}
76
77impl VmSpace {
78    /// Creates a new VM address space.
79    pub fn new() -> Self {
80        Self {
81            pt: KERNEL_PAGE_TABLE.get().unwrap().create_user_page_table(),
82            cpus: AtomicCpuSet::new(CpuSet::new_empty()),
83            iomems: SpinLock::new(Vec::new()),
84        }
85    }
86
87    /// Gets an immutable cursor in the virtual address range.
88    ///
89    /// The cursor behaves like a lock guard, exclusively owning a sub-tree of
90    /// the page table, preventing others from creating a cursor in it. So be
91    /// sure to drop the cursor as soon as possible.
92    ///
93    /// The creation of the cursor may block if another cursor having an
94    /// overlapping range is alive.
95    pub fn cursor<'a, G: AsAtomicModeGuard>(
96        &'a self,
97        guard: &'a G,
98        va: &Range<Vaddr>,
99    ) -> Result<Cursor<'a>> {
100        Ok(Cursor(self.pt.cursor(guard, va)?))
101    }
102
103    /// Gets an mutable cursor in the virtual address range.
104    ///
105    /// The same as [`Self::cursor`], the cursor behaves like a lock guard,
106    /// exclusively owning a sub-tree of the page table, preventing others
107    /// from creating a cursor in it. So be sure to drop the cursor as soon as
108    /// possible.
109    ///
110    /// The creation of the cursor may block if another cursor having an
111    /// overlapping range is alive. The modification to the mapping by the
112    /// cursor may also block or be overridden the mapping of another cursor.
113    pub fn cursor_mut<'a, G: AsAtomicModeGuard>(
114        &'a self,
115        guard: &'a G,
116        va: &Range<Vaddr>,
117    ) -> Result<CursorMut<'a>> {
118        Ok(CursorMut {
119            pt_cursor: self.pt.cursor_mut(guard, va)?,
120            flusher: TlbFlusher::new(&self.cpus, disable_preempt()),
121            vmspace: self,
122        })
123    }
124
125    /// Activates the page table on the current CPU.
126    pub fn activate(self: &Arc<Self>) {
127        let preempt_guard = disable_preempt();
128        let cpu = preempt_guard.current_cpu();
129
130        let last_ptr = ACTIVATED_VM_SPACE.load();
131
132        if last_ptr == Arc::as_ptr(self) {
133            return;
134        }
135
136        // Record ourselves in the CPU set and the activated VM space pointer.
137        // `Acquire` to ensure the modification to the PT is visible by this CPU.
138        self.cpus.add(cpu, Ordering::Acquire);
139
140        let self_ptr = Arc::into_raw(Arc::clone(self)) as *mut VmSpace;
141        ACTIVATED_VM_SPACE.store(self_ptr);
142
143        if !last_ptr.is_null() {
144            // SAFETY: The pointer is cast from an `Arc` when it's activated
145            // the last time, so it can be restored and only restored once.
146            let last = unsafe { Arc::from_raw(last_ptr) };
147            last.cpus.remove(cpu, Ordering::Relaxed);
148        }
149
150        self.pt.activate();
151    }
152
153    /// Creates a reader to read data from the user space of the current task.
154    ///
155    /// Returns `Err` if this `VmSpace` doesn't belong to the user space of the current task
156    /// or the `vaddr` and `len` do not represent a user space memory range.
157    ///
158    /// Users must ensure that no other page table is activated in the current task during the
159    /// lifetime of the created `VmReader`. This guarantees that the `VmReader` can operate correctly.
160    pub fn reader(&self, vaddr: Vaddr, len: usize) -> Result<VmReader<'_, Fallible>> {
161        if current_page_table_paddr() != self.pt.root_paddr() {
162            return Err(Error::AccessDenied);
163        }
164
165        if vaddr.saturating_add(len) > MAX_USERSPACE_VADDR {
166            return Err(Error::AccessDenied);
167        }
168
169        // SAFETY: The memory range is in user space, as checked above.
170        Ok(unsafe { VmReader::<Fallible>::from_user_space(vaddr as *const u8, len) })
171    }
172
173    /// Creates a writer to write data into the user space.
174    ///
175    /// Returns `Err` if this `VmSpace` doesn't belong to the user space of the current task
176    /// or the `vaddr` and `len` do not represent a user space memory range.
177    ///
178    /// Users must ensure that no other page table is activated in the current task during the
179    /// lifetime of the created `VmWriter`. This guarantees that the `VmWriter` can operate correctly.
180    pub fn writer(&self, vaddr: Vaddr, len: usize) -> Result<VmWriter<'_, Fallible>> {
181        if current_page_table_paddr() != self.pt.root_paddr() {
182            return Err(Error::AccessDenied);
183        }
184
185        if vaddr.saturating_add(len) > MAX_USERSPACE_VADDR {
186            return Err(Error::AccessDenied);
187        }
188
189        // `VmWriter` is neither `Sync` nor `Send`, so it will not live longer than the current
190        // task. This ensures that the correct page table is activated during the usage period of
191        // the `VmWriter`.
192        //
193        // SAFETY: The memory range is in user space, as checked above.
194        Ok(unsafe { VmWriter::<Fallible>::from_user_space(vaddr as *mut u8, len) })
195    }
196
197    /// Creates a reader/writer pair to read data from and write data into the user space.
198    ///
199    /// Returns `Err` if this `VmSpace` doesn't belong to the user space of the current task
200    /// or the `vaddr` and `len` do not represent a user space memory range.
201    ///
202    /// Users must ensure that no other page table is activated in the current task during the
203    /// lifetime of the created `VmReader` and `VmWriter`. This guarantees that the `VmReader`
204    /// and the `VmWriter` can operate correctly.
205    ///
206    /// This method is semantically equivalent to calling [`Self::reader`] and [`Self::writer`]
207    /// separately, but it avoids double checking the validity of the memory region.
208    pub fn reader_writer(
209        &self,
210        vaddr: Vaddr,
211        len: usize,
212    ) -> Result<(VmReader<'_, Fallible>, VmWriter<'_, Fallible>)> {
213        if current_page_table_paddr() != self.pt.root_paddr() {
214            return Err(Error::AccessDenied);
215        }
216
217        if vaddr.saturating_add(len) > MAX_USERSPACE_VADDR {
218            return Err(Error::AccessDenied);
219        }
220
221        // SAFETY: The memory range is in user space, as checked above.
222        let reader = unsafe { VmReader::<Fallible>::from_user_space(vaddr as *const u8, len) };
223
224        // `VmWriter` is neither `Sync` nor `Send`, so it will not live longer than the current
225        // task. This ensures that the correct page table is activated during the usage period of
226        // the `VmWriter`.
227        //
228        // SAFETY: The memory range is in user space, as checked above.
229        let writer = unsafe { VmWriter::<Fallible>::from_user_space(vaddr as *mut u8, len) };
230
231        Ok((reader, writer))
232    }
233}
234
235impl Default for VmSpace {
236    fn default() -> Self {
237        Self::new()
238    }
239}
240
241impl VmSpace {
242    /// Finds the [`IoMem`] that contains the given physical address.
243    ///
244    /// It is a private method for internal use only. Please refer to
245    /// [`CursorMut::find_iomem_by_paddr`] for more details.
246    fn find_iomem_by_paddr(&self, paddr: Paddr) -> Option<(IoMem, usize)> {
247        let iomems = self.iomems.lock();
248        for iomem in iomems.iter() {
249            let start = iomem.paddr();
250            let end = start + iomem.size();
251            if paddr >= start && paddr < end {
252                let offset = paddr - start;
253                return Some((iomem.clone(), offset));
254            }
255        }
256        None
257    }
258}
259
260/// The cursor for querying over the VM space without modifying it.
261///
262/// It exclusively owns a sub-tree of the page table, preventing others from
263/// reading or modifying the same sub-tree. Two read-only cursors can not be
264/// created from the same virtual address range either.
265pub struct Cursor<'a>(page_table::Cursor<'a, UserPtConfig>);
266
267impl Cursor<'_> {
268    /// Queries the mapping at the current virtual address.
269    ///
270    /// If the cursor is pointing to a valid virtual address that is locked,
271    /// it will return the virtual address range and the mapped item.
272    pub fn query(&mut self) -> Result<(Range<Vaddr>, Option<VmQueriedItem<'_>>)> {
273        let (range, item) = self.0.query()?;
274        Ok((range, item.map(VmQueriedItem::from)))
275    }
276
277    /// Moves the cursor forward to the next mapped virtual address.
278    ///
279    /// If there is mapped virtual address following the current address within
280    /// next `len` bytes, it will return that mapped address. In this case,
281    /// the cursor will stop at the mapped address.
282    ///
283    /// Otherwise, it will return `None`. And the cursor may stop at any
284    /// address after `len` bytes.
285    ///
286    /// # Panics
287    ///
288    /// Panics if the length is longer than the remaining range of the cursor.
289    pub fn find_next(&mut self, len: usize) -> Option<Vaddr> {
290        self.0.find_next(len)
291    }
292
293    /// Jumps to the virtual address.
294    ///
295    /// If the target address is out of the range, this method will return `Err`.
296    ///
297    /// # Panics
298    ///
299    /// This method panics if the address has bad alignment.
300    pub fn jump(&mut self, va: Vaddr) -> Result<()> {
301        self.0.jump(va)?;
302        Ok(())
303    }
304
305    /// Gets the virtual address of the current slot.
306    pub fn virt_addr(&self) -> Vaddr {
307        self.0.virt_addr()
308    }
309}
310
311/// The cursor for modifying the mappings in VM space.
312///
313/// It exclusively owns a sub-tree of the page table, preventing others from
314/// reading or modifying the same sub-tree.
315pub struct CursorMut<'a> {
316    pt_cursor: page_table::CursorMut<'a, UserPtConfig>,
317    // We have a read lock so the CPU set in the flusher is always a superset
318    // of actual activated CPUs.
319    flusher: TlbFlusher<'a, DisabledPreemptGuard>,
320    // References to the `VmSpace`
321    vmspace: &'a VmSpace,
322}
323
324impl<'a> CursorMut<'a> {
325    /// Queries the mapping at the current virtual address.
326    ///
327    /// This is the same as [`Cursor::query`].
328    ///
329    /// If the cursor is pointing to a valid virtual address that is locked,
330    /// it will return the virtual address range and the mapped item.
331    pub fn query(&mut self) -> Result<(Range<Vaddr>, Option<VmQueriedItem<'_>>)> {
332        let (range, item) = self.pt_cursor.query()?;
333        Ok((range, item.map(VmQueriedItem::from)))
334    }
335
336    /// Moves the cursor forward to the next mapped virtual address.
337    ///
338    /// This is the same as [`Cursor::find_next`].
339    pub fn find_next(&mut self, len: usize) -> Option<Vaddr> {
340        self.pt_cursor.find_next(len)
341    }
342
343    /// Jumps to the virtual address.
344    ///
345    /// This is the same as [`Cursor::jump`].
346    ///
347    /// # Panics
348    ///
349    /// This method panics if the address has bad alignment.
350    pub fn jump(&mut self, va: Vaddr) -> Result<()> {
351        self.pt_cursor.jump(va)?;
352        Ok(())
353    }
354
355    /// Gets the virtual address of the current slot.
356    pub fn virt_addr(&self) -> Vaddr {
357        self.pt_cursor.virt_addr()
358    }
359
360    /// Gets the dedicated TLB flusher for this cursor.
361    pub fn flusher(&mut self) -> &mut TlbFlusher<'a, DisabledPreemptGuard> {
362        &mut self.flusher
363    }
364
365    /// Maps a frame into the current slot.
366    ///
367    /// This method will bring the cursor to the next slot after the modification.
368    ///
369    /// # Panics
370    ///
371    /// Panics if the current virtual address is already mapped.
372    pub fn map(&mut self, frame: UFrame, prop: PageProperty) {
373        let item = VmItem::new_tracked(frame, prop);
374
375        // SAFETY: It is safe to map untyped memory into the userspace.
376        unsafe { self.pt_cursor.map(item) };
377    }
378
379    /// Maps a range of [`IoMem`] into the current slot.
380    ///
381    /// The memory region to be mapped is the [`IoMem`] range starting at
382    /// `offset` and extending to `offset + len`, or to the end of [`IoMem`],
383    /// whichever comes first. This method will bring the cursor to the next
384    /// slot after the modification.
385    ///
386    /// # Limitations
387    ///
388    /// Once an instance of `IoMem` is mapped to a `VmSpace`,
389    /// then the `IoMem` instance will only be dropped when the `VmSpace` is
390    /// dropped, not when all the mappings backed by the `IoMem` are destroyed
391    /// with the `unmap` method.
392    ///
393    /// # Panics
394    ///
395    /// Panics if
396    ///  - `len` or `offset` is not aligned to the page size;
397    ///  - the current virtual address is already mapped.
398    pub fn map_iomem(&mut self, io_mem: IoMem, prop: PageProperty, len: usize, offset: usize) {
399        assert_eq!(len % PAGE_SIZE, 0);
400        assert_eq!(offset % PAGE_SIZE, 0);
401
402        if offset >= io_mem.size() {
403            return;
404        }
405
406        let paddr_begin = io_mem.paddr() + offset;
407        let paddr_end = if io_mem.size() - offset < len {
408            io_mem.paddr() + io_mem.size()
409        } else {
410            io_mem.paddr() + len + offset
411        };
412
413        for current_paddr in (paddr_begin..paddr_end).step_by(PAGE_SIZE) {
414            // SAFETY: It is safe to map I/O memory into the userspace.
415            unsafe {
416                self.pt_cursor
417                    .map(VmItem::new_untracked_io(current_paddr, prop))
418            };
419        }
420
421        // If the `iomems` list in `VmSpace` does not contain the current I/O
422        // memory, push it to maintain the correct reference count.
423        let mut iomems = self.vmspace.iomems.lock();
424        if !iomems
425            .iter()
426            .any(|iomem| iomem.paddr() == io_mem.paddr() && iomem.size() == io_mem.size())
427        {
428            iomems.push(io_mem);
429        }
430    }
431
432    /// Finds an [`IoMem`] that was previously mapped to by [`Self::map_iomem`] and contains the
433    /// physical address.
434    ///
435    /// This method can recover the originally mapped `IoMem` from the physical address returned by
436    /// [`Self::query`]. If the query returns a [`VmQueriedItem::MappedIoMem`], this method is
437    /// guaranteed to succeed with the specific physical address. However, if the corresponding
438    /// mapping is subsequently unmapped, it is unspecified whether this method will still succeed
439    /// or not.
440    ///
441    /// On success, this method returns the `IoMem` and the offset from the `IoMem` start to the
442    /// given physical address. Otherwise, this method returns `None`.
443    pub fn find_iomem_by_paddr(&self, paddr: Paddr) -> Option<(IoMem, usize)> {
444        self.vmspace.find_iomem_by_paddr(paddr)
445    }
446
447    /// Clears the mapping starting from the current slot,
448    /// and returns the number of unmapped pages.
449    ///
450    /// This method will bring the cursor forward by `len` bytes in the virtual
451    /// address space after the modification.
452    ///
453    /// Already-absent mappings encountered by the cursor will be skipped. It
454    /// is valid to unmap a range that is not mapped.
455    ///
456    /// It must issue and dispatch a TLB flush after the operation. Otherwise,
457    /// the memory safety will be compromised. Please call this function less
458    /// to avoid the overhead of TLB flush. Using a large `len` is wiser than
459    /// splitting the operation into multiple small ones.
460    ///
461    /// # Panics
462    ///
463    /// Panics if:
464    ///  - the length is longer than the remaining range of the cursor;
465    ///  - the length is not page-aligned.
466    pub fn unmap(&mut self, len: usize) -> usize {
467        let end_va = self.virt_addr() + len;
468        let mut num_unmapped: usize = 0;
469        loop {
470            // SAFETY: It is safe to un-map memory in the userspace. And the
471            // un-mapped items are dropped after TLB flushes.
472            let Some(frag) = (unsafe { self.pt_cursor.take_next(end_va - self.virt_addr()) })
473            else {
474                break; // No more mappings in the range.
475            };
476
477            match frag {
478                PageTableFrag::Mapped { va, item, .. } => {
479                    // SAFETY: If the item is not a scalar (e.g., a frame
480                    // pointer), we will drop it after the RCU grace period
481                    // (see `issue_tlb_flush_with`).
482                    let (item, panic_guard) = unsafe { RcuDrop::into_inner(item) };
483
484                    match item {
485                        VmItem {
486                            mapped_item: MappedItem::TrackedFrame(old_frame),
487                            ..
488                        } => {
489                            num_unmapped += 1;
490
491                            let rcu_frame = RcuDrop::new(old_frame);
492                            panic_guard.forget();
493                            let rcu_frame = Frame::rcu_from_unsized(rcu_frame);
494                            self.flusher
495                                .issue_tlb_flush_with(TlbFlushOp::for_single(va), rcu_frame);
496                        }
497                        VmItem {
498                            mapped_item: MappedItem::UntrackedIoMem { .. },
499                            ..
500                        } => {
501                            panic_guard.forget();
502
503                            // Flush the TLB entry for the current address, but
504                            // in the current design, we cannot drop the
505                            // corresponding `IoMem`. This is because we manage
506                            // the range of I/O as a whole, but the frames
507                            // handled here might be one segment of it.
508                            self.flusher.issue_tlb_flush(TlbFlushOp::for_single(va));
509                        }
510                    }
511                }
512                PageTableFrag::StrayPageTable {
513                    pt,
514                    va,
515                    len,
516                    num_frames,
517                } => {
518                    num_unmapped += num_frames;
519
520                    self.flusher.issue_tlb_flush_with(
521                        TlbFlushOp::for_range(va..va + len),
522                        Frame::rcu_from_unsized(pt),
523                    );
524                }
525            }
526        }
527
528        self.flusher.dispatch_tlb_flush();
529
530        num_unmapped
531    }
532
533    /// Applies the operation to the next slot of mapping within the range.
534    ///
535    /// The range to be found in is the current virtual address with the
536    /// provided length.
537    ///
538    /// The function stops and yields the actually protected range if it has
539    /// actually protected a page, no matter if the following pages are also
540    /// required to be protected.
541    ///
542    /// It also makes the cursor moves forward to the next page after the
543    /// protected one. If no mapped pages exist in the following range, the
544    /// cursor will stop at the end of the range and return [`None`].
545    ///
546    /// Note that it will **NOT** flush the TLB after the operation. Please
547    /// make the decision yourself on when and how to flush the TLB using
548    /// [`Self::flusher`].
549    ///
550    /// # Panics
551    ///
552    /// Panics if the length is longer than the remaining range of the cursor.
553    pub fn protect_next(
554        &mut self,
555        len: usize,
556        mut op: impl FnMut(&mut PageFlags, &mut CachePolicy),
557    ) -> Option<Range<Vaddr>> {
558        // SAFETY: It is safe to set `PageFlags` and `CachePolicy` of memory
559        // in the userspace.
560        unsafe {
561            self.pt_cursor.protect_next(len, &mut |prop| {
562                op(&mut prop.flags, &mut prop.cache);
563            })
564        }
565    }
566}
567
568cpu_local_cell! {
569    /// The `Arc` pointer to the activated VM space on this CPU. If the pointer
570    /// is NULL, it means that the activated page table is merely the kernel
571    /// page table.
572    // TODO: If we are enabling ASID, we need to maintain the TLB state of each
573    // CPU, rather than merely the activated `VmSpace`. When ASID is enabled,
574    // the non-active `VmSpace`s can still have their TLB entries in the CPU!
575    static ACTIVATED_VM_SPACE: *const VmSpace = core::ptr::null();
576}
577
578#[cfg(ktest)]
579pub(super) fn get_activated_vm_space() -> *const VmSpace {
580    ACTIVATED_VM_SPACE.load()
581}
582
583/// The result of a query over the VM space.
584pub enum VmQueriedItem<'a> {
585    /// The current slot is mapped, the frame within is allocated from the
586    /// physical memory.
587    MappedRam {
588        /// The mapped frame.
589        frame: FrameRef<'a, dyn AnyUFrameMeta>,
590        /// The property of the slot.
591        prop: PageProperty,
592    },
593    /// The current slot is mapped, the frame within is allocated from the
594    /// MMIO memory.
595    MappedIoMem {
596        /// The physical address of the corresponding I/O memory.
597        paddr: Paddr,
598        /// The property of the slot.
599        prop: PageProperty,
600    },
601}
602
603impl VmQueriedItem<'_> {
604    /// Returns the page property of the mapped item.
605    pub fn prop(&self) -> &PageProperty {
606        match self {
607            Self::MappedRam { prop, .. } => prop,
608            Self::MappedIoMem { prop, .. } => prop,
609        }
610    }
611}
612
613/// Internal representation of a VM item.
614///
615/// This is kept private to ensure memory safety. The public interface
616/// should use `VmQueriedItem` for querying mapping information.
617#[derive(Clone, Debug, PartialEq)]
618pub(crate) struct VmItem {
619    prop: PageProperty,
620    mapped_item: MappedItem,
621}
622
623/// A reference to a VM item.
624#[derive(Debug)]
625pub(crate) struct VmItemRef<'a> {
626    prop: PageProperty,
627    mapped_item: MappedItemRef<'a>,
628}
629
630#[derive(Clone, Debug, PartialEq)]
631enum MappedItem {
632    TrackedFrame(UFrame),
633    UntrackedIoMem { paddr: Paddr, level: PagingLevel },
634}
635
636#[derive(Debug)]
637enum MappedItemRef<'a> {
638    TrackedFrame(FrameRef<'a, dyn AnyUFrameMeta>),
639    UntrackedIoMem { paddr: Paddr, level: PagingLevel },
640}
641
642impl VmItem {
643    /// Creates a new `VmItem` that maps a tracked frame.
644    pub(super) fn new_tracked(frame: UFrame, prop: PageProperty) -> Self {
645        Self {
646            prop,
647            mapped_item: MappedItem::TrackedFrame(frame),
648        }
649    }
650
651    /// Creates a new `VmItem` that maps an untracked I/O memory.
652    fn new_untracked_io(paddr: Paddr, prop: PageProperty) -> Self {
653        Self {
654            prop,
655            mapped_item: MappedItem::UntrackedIoMem { paddr, level: 1 },
656        }
657    }
658}
659
660impl<'a> From<VmItemRef<'a>> for VmQueriedItem<'a> {
661    fn from(item: VmItemRef<'a>) -> Self {
662        match item.mapped_item {
663            MappedItemRef::TrackedFrame(frame) => VmQueriedItem::MappedRam {
664                frame,
665                prop: item.prop,
666            },
667            MappedItemRef::UntrackedIoMem { paddr, level } => {
668                debug_assert_eq!(level, 1);
669                VmQueriedItem::MappedIoMem {
670                    paddr,
671                    prop: item.prop,
672                }
673            }
674        }
675    }
676}
677
678#[derive(Clone, Debug)]
679pub(crate) struct UserPtConfig {}
680
681// SAFETY: `item_raw_info`, `item_into_raw`, `item_from_raw`, and
682// `item_ref_from_raw` are correctly implemented with respect to the `Item` and
683// `ItemRef` types.
684unsafe impl PageTableConfig for UserPtConfig {
685    const TOP_LEVEL_INDEX_RANGE: Range<usize> = 0..256;
686
687    type E = PageTableEntry;
688    type C = PagingConsts;
689
690    type Item = VmItem;
691    type ItemRef<'a> = VmItemRef<'a>;
692
693    fn item_raw_info(item: &Self::Item) -> (Paddr, PagingLevel, PageProperty) {
694        match &item.mapped_item {
695            MappedItem::TrackedFrame(frame) => {
696                let mut prop = item.prop;
697                prop.priv_flags -= PrivilegedPageFlags::AVAIL1; // Clear AVAIL1 for tracked frames
698                let level = frame.map_level();
699                let paddr = frame.paddr();
700                (paddr, level, prop)
701            }
702            MappedItem::UntrackedIoMem { paddr, level } => {
703                let mut prop = item.prop;
704                prop.priv_flags |= PrivilegedPageFlags::AVAIL1; // Set AVAIL1 for I/O memory
705                (*paddr, *level, prop)
706            }
707        }
708    }
709
710    unsafe fn item_from_raw(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self::Item {
711        debug_assert_eq!(level, 1);
712        if prop.priv_flags.contains(PrivilegedPageFlags::AVAIL1) {
713            // `AVAIL1` is set, this is I/O memory.
714            VmItem::new_untracked_io(paddr, prop)
715        } else {
716            // `AVAIL1` is clear, this is tracked memory.
717            // SAFETY: The caller ensures safety.
718            let frame = unsafe { Frame::<dyn AnyUFrameMeta>::from_raw(paddr) };
719            VmItem::new_tracked(frame, prop)
720        }
721    }
722
723    unsafe fn item_ref_from_raw<'a>(
724        paddr: Paddr,
725        level: PagingLevel,
726        prop: PageProperty,
727    ) -> Self::ItemRef<'a> {
728        debug_assert_eq!(level, 1);
729        if prop.priv_flags.contains(PrivilegedPageFlags::AVAIL1) {
730            // `AVAIL1` is set, this is I/O memory.
731            VmItemRef {
732                prop,
733                mapped_item: MappedItemRef::UntrackedIoMem { paddr, level },
734            }
735        } else {
736            // `AVAIL1` is clear, this is tracked memory.
737            // SAFETY: The caller ensures that the frame outlives `'a` and that
738            // the type matches the frame.
739            let frame_ref = unsafe { FrameRef::<dyn AnyUFrameMeta>::borrow_paddr(paddr) };
740            VmItemRef {
741                prop,
742                mapped_item: MappedItemRef::TrackedFrame(frame_ref),
743            }
744        }
745    }
746}