ostd/mm/
vm_space.rs

1// SPDX-License-Identifier: MPL-2.0
2
3//! Virtual memory space management.
4//!
5//! The [`VmSpace`] struct is provided to manage the virtual memory space of a
6//! user. Cursors are used to traverse and modify over the virtual memory space
7//! concurrently. The VM space cursor [`self::Cursor`] is just a wrapper over
8//! the page table cursor, providing efficient, powerful concurrent accesses
9//! to the page table.
10
11use core::{ops::Range, sync::atomic::Ordering};
12
13use super::{AnyUFrameMeta, PagingLevel, page_table::PageTableConfig};
14use crate::{
15    Error,
16    arch::mm::{PageTableEntry, PagingConsts, current_page_table_paddr},
17    cpu::{AtomicCpuSet, CpuSet, PinCurrentCpu},
18    cpu_local_cell,
19    io::IoMem,
20    mm::{
21        Frame, MAX_USERSPACE_VADDR, PAGE_SIZE, PageProperty, PrivilegedPageFlags, UFrame, VmReader,
22        VmWriter,
23        io::Fallible,
24        kspace::KERNEL_PAGE_TABLE,
25        page_prop::{CachePolicy, PageFlags},
26        page_table::{self, PageTable, PageTableFrag},
27        tlb::{TlbFlushOp, TlbFlusher},
28    },
29    prelude::*,
30    sync::SpinLock,
31    task::{DisabledPreemptGuard, atomic_mode::AsAtomicModeGuard, disable_preempt},
32};
33
34/// A virtual address space for user-mode tasks, enabling safe manipulation of user-space memory.
35///
36/// The `VmSpace` type provides memory isolation guarantees between user-space and
37/// kernel-space. For example, given an arbitrary user-space pointer, one can read and
38/// write the memory location referred to by the user-space pointer without the risk of
39/// breaking the memory safety of the kernel space.
40///
41/// # Task Association Semantics
42///
43/// As far as OSTD is concerned, a `VmSpace` is not necessarily associated with a task. Once a
44/// `VmSpace` is activated (see [`VmSpace::activate`]), it remains activated until another
45/// `VmSpace` is activated **possibly by another task running on the same CPU**.
46///
47/// This means that it's up to the kernel to ensure that a task's `VmSpace` is always activated
48/// while the task is running. This can be done by using the injected post schedule handler
49/// (see [`inject_post_schedule_handler`]) to always activate the correct `VmSpace` after each
50/// context switch.
51///
52/// If the kernel otherwise decides not to ensure that the running task's `VmSpace` is always
53/// activated, the kernel must deal with race conditions when calling methods that require the
54/// `VmSpace` to be activated, e.g., [`UserMode::execute`], [`VmSpace::reader`],
55/// [`VmSpace::writer`]. Otherwise, the behavior is unspecified, though it's guaranteed _not_ to
56/// compromise the kernel's memory safety.
57///
58/// # Memory Backing
59///
60/// A newly-created `VmSpace` is not backed by any physical memory pages. To
61/// provide memory pages for a `VmSpace`, one can allocate and map physical
62/// memory ([`UFrame`]s) to the `VmSpace` using the cursor.
63///
64/// A `VmSpace` can also attach a page fault handler, which will be invoked to
65/// handle page faults generated from user space.
66///
67/// [`inject_post_schedule_handler`]: crate::task::inject_post_schedule_handler
68/// [`UserMode::execute`]: crate::user::UserMode::execute
69#[derive(Debug)]
70pub struct VmSpace {
71    pt: PageTable<UserPtConfig>,
72    cpus: AtomicCpuSet,
73    iomems: SpinLock<Vec<IoMem>>,
74}
75
76impl VmSpace {
77    /// Creates a new VM address space.
78    pub fn new() -> Self {
79        Self {
80            pt: KERNEL_PAGE_TABLE.get().unwrap().create_user_page_table(),
81            cpus: AtomicCpuSet::new(CpuSet::new_empty()),
82            iomems: SpinLock::new(Vec::new()),
83        }
84    }
85
86    /// Gets an immutable cursor in the virtual address range.
87    ///
88    /// The cursor behaves like a lock guard, exclusively owning a sub-tree of
89    /// the page table, preventing others from creating a cursor in it. So be
90    /// sure to drop the cursor as soon as possible.
91    ///
92    /// The creation of the cursor may block if another cursor having an
93    /// overlapping range is alive.
94    pub fn cursor<'a, G: AsAtomicModeGuard>(
95        &'a self,
96        guard: &'a G,
97        va: &Range<Vaddr>,
98    ) -> Result<Cursor<'a>> {
99        Ok(Cursor(self.pt.cursor(guard, va)?))
100    }
101
102    /// Gets an mutable cursor in the virtual address range.
103    ///
104    /// The same as [`Self::cursor`], the cursor behaves like a lock guard,
105    /// exclusively owning a sub-tree of the page table, preventing others
106    /// from creating a cursor in it. So be sure to drop the cursor as soon as
107    /// possible.
108    ///
109    /// The creation of the cursor may block if another cursor having an
110    /// overlapping range is alive. The modification to the mapping by the
111    /// cursor may also block or be overridden the mapping of another cursor.
112    pub fn cursor_mut<'a, G: AsAtomicModeGuard>(
113        &'a self,
114        guard: &'a G,
115        va: &Range<Vaddr>,
116    ) -> Result<CursorMut<'a>> {
117        Ok(CursorMut {
118            pt_cursor: self.pt.cursor_mut(guard, va)?,
119            flusher: TlbFlusher::new(&self.cpus, disable_preempt()),
120            vmspace: self,
121        })
122    }
123
124    /// Activates the page table on the current CPU.
125    pub fn activate(self: &Arc<Self>) {
126        let preempt_guard = disable_preempt();
127        let cpu = preempt_guard.current_cpu();
128
129        let last_ptr = ACTIVATED_VM_SPACE.load();
130
131        if last_ptr == Arc::as_ptr(self) {
132            return;
133        }
134
135        // Record ourselves in the CPU set and the activated VM space pointer.
136        // `Acquire` to ensure the modification to the PT is visible by this CPU.
137        self.cpus.add(cpu, Ordering::Acquire);
138
139        let self_ptr = Arc::into_raw(Arc::clone(self)) as *mut VmSpace;
140        ACTIVATED_VM_SPACE.store(self_ptr);
141
142        if !last_ptr.is_null() {
143            // SAFETY: The pointer is cast from an `Arc` when it's activated
144            // the last time, so it can be restored and only restored once.
145            let last = unsafe { Arc::from_raw(last_ptr) };
146            last.cpus.remove(cpu, Ordering::Relaxed);
147        }
148
149        self.pt.activate();
150    }
151
152    /// Creates a reader to read data from the user space of the current task.
153    ///
154    /// Returns `Err` if this `VmSpace` doesn't belong to the user space of the current task
155    /// or the `vaddr` and `len` do not represent a user space memory range.
156    ///
157    /// Users must ensure that no other page table is activated in the current task during the
158    /// lifetime of the created `VmReader`. This guarantees that the `VmReader` can operate correctly.
159    pub fn reader(&self, vaddr: Vaddr, len: usize) -> Result<VmReader<'_, Fallible>> {
160        if current_page_table_paddr() != self.pt.root_paddr() {
161            return Err(Error::AccessDenied);
162        }
163
164        if vaddr.saturating_add(len) > MAX_USERSPACE_VADDR {
165            return Err(Error::AccessDenied);
166        }
167
168        // SAFETY: The memory range is in user space, as checked above.
169        Ok(unsafe { VmReader::<Fallible>::from_user_space(vaddr as *const u8, len) })
170    }
171
172    /// Creates a writer to write data into the user space.
173    ///
174    /// Returns `Err` if this `VmSpace` doesn't belong to the user space of the current task
175    /// or the `vaddr` and `len` do not represent a user space memory range.
176    ///
177    /// Users must ensure that no other page table is activated in the current task during the
178    /// lifetime of the created `VmWriter`. This guarantees that the `VmWriter` can operate correctly.
179    pub fn writer(&self, vaddr: Vaddr, len: usize) -> Result<VmWriter<'_, Fallible>> {
180        if current_page_table_paddr() != self.pt.root_paddr() {
181            return Err(Error::AccessDenied);
182        }
183
184        if vaddr.saturating_add(len) > MAX_USERSPACE_VADDR {
185            return Err(Error::AccessDenied);
186        }
187
188        // `VmWriter` is neither `Sync` nor `Send`, so it will not live longer than the current
189        // task. This ensures that the correct page table is activated during the usage period of
190        // the `VmWriter`.
191        //
192        // SAFETY: The memory range is in user space, as checked above.
193        Ok(unsafe { VmWriter::<Fallible>::from_user_space(vaddr as *mut u8, len) })
194    }
195
196    /// Creates a reader/writer pair to read data from and write data into the user space.
197    ///
198    /// Returns `Err` if this `VmSpace` doesn't belong to the user space of the current task
199    /// or the `vaddr` and `len` do not represent a user space memory range.
200    ///
201    /// Users must ensure that no other page table is activated in the current task during the
202    /// lifetime of the created `VmReader` and `VmWriter`. This guarantees that the `VmReader`
203    /// and the `VmWriter` can operate correctly.
204    ///
205    /// This method is semantically equivalent to calling [`Self::reader`] and [`Self::writer`]
206    /// separately, but it avoids double checking the validity of the memory region.
207    pub fn reader_writer(
208        &self,
209        vaddr: Vaddr,
210        len: usize,
211    ) -> Result<(VmReader<'_, Fallible>, VmWriter<'_, Fallible>)> {
212        if current_page_table_paddr() != self.pt.root_paddr() {
213            return Err(Error::AccessDenied);
214        }
215
216        if vaddr.saturating_add(len) > MAX_USERSPACE_VADDR {
217            return Err(Error::AccessDenied);
218        }
219
220        // SAFETY: The memory range is in user space, as checked above.
221        let reader = unsafe { VmReader::<Fallible>::from_user_space(vaddr as *const u8, len) };
222
223        // `VmWriter` is neither `Sync` nor `Send`, so it will not live longer than the current
224        // task. This ensures that the correct page table is activated during the usage period of
225        // the `VmWriter`.
226        //
227        // SAFETY: The memory range is in user space, as checked above.
228        let writer = unsafe { VmWriter::<Fallible>::from_user_space(vaddr as *mut u8, len) };
229
230        Ok((reader, writer))
231    }
232}
233
234impl Default for VmSpace {
235    fn default() -> Self {
236        Self::new()
237    }
238}
239
240impl VmSpace {
241    /// Finds the [`IoMem`] that contains the given physical address.
242    ///
243    /// It is a private method for internal use only. Please refer to
244    /// [`CursorMut::find_iomem_by_paddr`] for more details.
245    fn find_iomem_by_paddr(&self, paddr: Paddr) -> Option<(IoMem, usize)> {
246        let iomems = self.iomems.lock();
247        for iomem in iomems.iter() {
248            let start = iomem.paddr();
249            let end = start + iomem.size();
250            if paddr >= start && paddr < end {
251                let offset = paddr - start;
252                return Some((iomem.clone(), offset));
253            }
254        }
255        None
256    }
257}
258
259/// The cursor for querying over the VM space without modifying it.
260///
261/// It exclusively owns a sub-tree of the page table, preventing others from
262/// reading or modifying the same sub-tree. Two read-only cursors can not be
263/// created from the same virtual address range either.
264pub struct Cursor<'a>(page_table::Cursor<'a, UserPtConfig>);
265
266impl Iterator for Cursor<'_> {
267    type Item = (Range<Vaddr>, Option<VmQueriedItem>);
268
269    fn next(&mut self) -> Option<Self::Item> {
270        self.0
271            .next()
272            .map(|(range, item)| (range, item.map(VmQueriedItem::from)))
273    }
274}
275
276impl Cursor<'_> {
277    /// Queries the mapping at the current virtual address.
278    ///
279    /// If the cursor is pointing to a valid virtual address that is locked,
280    /// it will return the virtual address range and the mapped item.
281    pub fn query(&mut self) -> Result<(Range<Vaddr>, Option<VmQueriedItem>)> {
282        let (range, item) = self.0.query()?;
283        Ok((range, item.map(VmQueriedItem::from)))
284    }
285
286    /// Moves the cursor forward to the next mapped virtual address.
287    ///
288    /// If there is mapped virtual address following the current address within
289    /// next `len` bytes, it will return that mapped address. In this case,
290    /// the cursor will stop at the mapped address.
291    ///
292    /// Otherwise, it will return `None`. And the cursor may stop at any
293    /// address after `len` bytes.
294    ///
295    /// # Panics
296    ///
297    /// Panics if the length is longer than the remaining range of the cursor.
298    pub fn find_next(&mut self, len: usize) -> Option<Vaddr> {
299        self.0.find_next(len)
300    }
301
302    /// Jump to the virtual address.
303    pub fn jump(&mut self, va: Vaddr) -> Result<()> {
304        self.0.jump(va)?;
305        Ok(())
306    }
307
308    /// Get the virtual address of the current slot.
309    pub fn virt_addr(&self) -> Vaddr {
310        self.0.virt_addr()
311    }
312}
313
314/// The cursor for modifying the mappings in VM space.
315///
316/// It exclusively owns a sub-tree of the page table, preventing others from
317/// reading or modifying the same sub-tree.
318pub struct CursorMut<'a> {
319    pt_cursor: page_table::CursorMut<'a, UserPtConfig>,
320    // We have a read lock so the CPU set in the flusher is always a superset
321    // of actual activated CPUs.
322    flusher: TlbFlusher<'a, DisabledPreemptGuard>,
323    // References to the `VmSpace`
324    vmspace: &'a VmSpace,
325}
326
327impl<'a> CursorMut<'a> {
328    /// Queries the mapping at the current virtual address.
329    ///
330    /// This is the same as [`Cursor::query`].
331    ///
332    /// If the cursor is pointing to a valid virtual address that is locked,
333    /// it will return the virtual address range and the mapped item.
334    pub fn query(&mut self) -> Result<(Range<Vaddr>, Option<VmQueriedItem>)> {
335        let (range, item) = self.pt_cursor.query()?;
336        Ok((range, item.map(VmQueriedItem::from)))
337    }
338
339    /// Moves the cursor forward to the next mapped virtual address.
340    ///
341    /// This is the same as [`Cursor::find_next`].
342    pub fn find_next(&mut self, len: usize) -> Option<Vaddr> {
343        self.pt_cursor.find_next(len)
344    }
345
346    /// Jump to the virtual address.
347    ///
348    /// This is the same as [`Cursor::jump`].
349    pub fn jump(&mut self, va: Vaddr) -> Result<()> {
350        self.pt_cursor.jump(va)?;
351        Ok(())
352    }
353
354    /// Get the virtual address of the current slot.
355    pub fn virt_addr(&self) -> Vaddr {
356        self.pt_cursor.virt_addr()
357    }
358
359    /// Get the dedicated TLB flusher for this cursor.
360    pub fn flusher(&mut self) -> &mut TlbFlusher<'a, DisabledPreemptGuard> {
361        &mut self.flusher
362    }
363
364    /// Maps a frame into the current slot.
365    ///
366    /// This method will bring the cursor to the next slot after the modification.
367    pub fn map(&mut self, frame: UFrame, prop: PageProperty) {
368        let start_va = self.virt_addr();
369        let item = VmItem::new_tracked(frame, prop);
370
371        // SAFETY: It is safe to map untyped memory into the userspace.
372        let Err(frag) = (unsafe { self.pt_cursor.map(item) }) else {
373            return; // No mapping exists at the current address.
374        };
375
376        self.handle_remapped_frag(frag, start_va);
377    }
378
379    /// Maps a range of [`IoMem`] into the current slot.
380    ///
381    /// The memory region to be mapped is the [`IoMem`] range starting at
382    /// `offset` and extending to `offset + len`, or to the end of [`IoMem`],
383    /// whichever comes first. This method will bring the cursor to the next
384    /// slot after the modification.
385    ///
386    /// # Limitations
387    ///
388    /// Once an instance of `IoMem` is mapped to a `VmSpace`,
389    /// then the `IoMem` instance will only be dropped when the `VmSpace` is
390    /// dropped, not when all the mappings backed by the `IoMem` are destroyed
391    /// with the `unmap` method.
392    ///
393    /// # Panics
394    ///
395    /// Panics if `len` or `offset` is not aligned to the page size.
396    pub fn map_iomem(&mut self, io_mem: IoMem, prop: PageProperty, len: usize, offset: usize) {
397        assert_eq!(len % PAGE_SIZE, 0);
398        assert_eq!(offset % PAGE_SIZE, 0);
399
400        if offset >= io_mem.size() {
401            return;
402        }
403
404        let paddr_begin = io_mem.paddr() + offset;
405        let paddr_end = if io_mem.size() - offset < len {
406            io_mem.paddr() + io_mem.size()
407        } else {
408            io_mem.paddr() + len + offset
409        };
410
411        for current_paddr in (paddr_begin..paddr_end).step_by(PAGE_SIZE) {
412            // Save the current virtual address before mapping, since map() will advance the cursor
413            let current_va = self.virt_addr();
414
415            // SAFETY: It is safe to map I/O memory into the userspace.
416            let map_result = unsafe {
417                self.pt_cursor
418                    .map(VmItem::new_untracked_io(current_paddr, prop))
419            };
420
421            let Err(frag) = map_result else {
422                // No mapping exists at the current address.
423                continue;
424            };
425
426            self.handle_remapped_frag(frag, current_va);
427        }
428
429        // If the `iomems` list in `VmSpace` does not contain the current I/O
430        // memory, push it to maintain the correct reference count.
431        let mut iomems = self.vmspace.iomems.lock();
432        if !iomems
433            .iter()
434            .any(|iomem| iomem.paddr() == io_mem.paddr() && iomem.size() == io_mem.size())
435        {
436            iomems.push(io_mem);
437        }
438    }
439
440    /// Finds an [`IoMem`] that was previously mapped to by [`Self::map_iomem`] and contains the
441    /// physical address.
442    ///
443    /// This method can recover the originally mapped `IoMem` from the physical address returned by
444    /// [`Self::query`]. If the query returns a [`VmQueriedItem::MappedIoMem`], this method is
445    /// guaranteed to succeed with the specific physical address. However, if the corresponding
446    /// mapping is subsequently unmapped, it is unspecified whether this method will still succeed
447    /// or not.
448    ///
449    /// On success, this method returns the `IoMem` and the offset from the `IoMem` start to the
450    /// given physical address. Otherwise, this method returns `None`.
451    pub fn find_iomem_by_paddr(&self, paddr: Paddr) -> Option<(IoMem, usize)> {
452        self.vmspace.find_iomem_by_paddr(paddr)
453    }
454
455    /// Handles a page table fragment that was remapped.
456    ///
457    /// This method handles the TLB flushing and other cleanup when a mapping
458    /// operation results in a fragment being replaced.
459    fn handle_remapped_frag(&mut self, frag: PageTableFrag<UserPtConfig>, start_va: Vaddr) {
460        match frag {
461            PageTableFrag::Mapped { va, item } => {
462                debug_assert_eq!(va, start_va);
463                match item.mapped_item {
464                    MappedItem::TrackedFrame(old_frame) => {
465                        self.flusher.issue_tlb_flush_with(
466                            TlbFlushOp::for_single(start_va),
467                            old_frame.into(),
468                        );
469                    }
470                    MappedItem::UntrackedIoMem { .. } => {
471                        // Flush the TLB entry for the current address, but in
472                        // the current design, we cannot drop the corresponding
473                        // `IoMem`. This is because we manage the range of I/O
474                        // as a whole, but the frames handled here might be one
475                        // segment of it.
476                        self.flusher
477                            .issue_tlb_flush(TlbFlushOp::for_single(start_va));
478                    }
479                }
480                self.flusher.dispatch_tlb_flush();
481            }
482            PageTableFrag::StrayPageTable { .. } => {
483                panic!("`UFrame` is base page sized but re-mapping out a child PT");
484            }
485        }
486    }
487
488    /// Clears the mapping starting from the current slot,
489    /// and returns the number of unmapped pages.
490    ///
491    /// This method will bring the cursor forward by `len` bytes in the virtual
492    /// address space after the modification.
493    ///
494    /// Already-absent mappings encountered by the cursor will be skipped. It
495    /// is valid to unmap a range that is not mapped.
496    ///
497    /// It must issue and dispatch a TLB flush after the operation. Otherwise,
498    /// the memory safety will be compromised. Please call this function less
499    /// to avoid the overhead of TLB flush. Using a large `len` is wiser than
500    /// splitting the operation into multiple small ones.
501    ///
502    /// # Panics
503    /// Panics if:
504    ///  - the length is longer than the remaining range of the cursor;
505    ///  - the length is not page-aligned.
506    pub fn unmap(&mut self, len: usize) -> usize {
507        let end_va = self.virt_addr() + len;
508        let mut num_unmapped: usize = 0;
509        loop {
510            // SAFETY: It is safe to un-map memory in the userspace.
511            let Some(frag) = (unsafe { self.pt_cursor.take_next(end_va - self.virt_addr()) })
512            else {
513                break; // No more mappings in the range.
514            };
515
516            match frag {
517                PageTableFrag::Mapped { va, item, .. } => {
518                    match item {
519                        VmItem {
520                            mapped_item: MappedItem::TrackedFrame(old_frame),
521                            ..
522                        } => {
523                            num_unmapped += 1;
524                            self.flusher
525                                .issue_tlb_flush_with(TlbFlushOp::for_single(va), old_frame.into());
526                        }
527                        VmItem {
528                            mapped_item: MappedItem::UntrackedIoMem { .. },
529                            ..
530                        } => {
531                            // Flush the TLB entry for the current address, but
532                            // in the current design, we cannot drop the
533                            // corresponding `IoMem`. This is because we manage
534                            // the range of I/O as a whole, but the frames
535                            // handled here might be one segment of it.
536                            self.flusher.issue_tlb_flush(TlbFlushOp::for_single(va));
537                        }
538                    }
539                }
540                PageTableFrag::StrayPageTable {
541                    pt,
542                    va,
543                    len,
544                    num_frames,
545                } => {
546                    num_unmapped += num_frames;
547                    self.flusher
548                        .issue_tlb_flush_with(TlbFlushOp::for_range(va..va + len), pt);
549                }
550            }
551        }
552
553        self.flusher.dispatch_tlb_flush();
554
555        num_unmapped
556    }
557
558    /// Applies the operation to the next slot of mapping within the range.
559    ///
560    /// The range to be found in is the current virtual address with the
561    /// provided length.
562    ///
563    /// The function stops and yields the actually protected range if it has
564    /// actually protected a page, no matter if the following pages are also
565    /// required to be protected.
566    ///
567    /// It also makes the cursor moves forward to the next page after the
568    /// protected one. If no mapped pages exist in the following range, the
569    /// cursor will stop at the end of the range and return [`None`].
570    ///
571    /// Note that it will **NOT** flush the TLB after the operation. Please
572    /// make the decision yourself on when and how to flush the TLB using
573    /// [`Self::flusher`].
574    ///
575    /// # Panics
576    ///
577    /// Panics if the length is longer than the remaining range of the cursor.
578    pub fn protect_next(
579        &mut self,
580        len: usize,
581        mut op: impl FnMut(&mut PageFlags, &mut CachePolicy),
582    ) -> Option<Range<Vaddr>> {
583        // SAFETY: It is safe to set `PageFlags` and `CachePolicy` of memory
584        // in the userspace.
585        unsafe {
586            self.pt_cursor.protect_next(len, &mut |prop| {
587                op(&mut prop.flags, &mut prop.cache);
588            })
589        }
590    }
591}
592
593cpu_local_cell! {
594    /// The `Arc` pointer to the activated VM space on this CPU. If the pointer
595    /// is NULL, it means that the activated page table is merely the kernel
596    /// page table.
597    // TODO: If we are enabling ASID, we need to maintain the TLB state of each
598    // CPU, rather than merely the activated `VmSpace`. When ASID is enabled,
599    // the non-active `VmSpace`s can still have their TLB entries in the CPU!
600    static ACTIVATED_VM_SPACE: *const VmSpace = core::ptr::null();
601}
602
603#[cfg(ktest)]
604pub(super) fn get_activated_vm_space() -> *const VmSpace {
605    ACTIVATED_VM_SPACE.load()
606}
607
608/// The result of a query over the VM space.
609#[derive(Debug, Clone, PartialEq)]
610pub enum VmQueriedItem {
611    /// The current slot is mapped, the frame within is allocated from the
612    /// physical memory.
613    MappedRam {
614        /// The mapped frame.
615        frame: UFrame,
616        /// The property of the slot.
617        prop: PageProperty,
618    },
619    /// The current slot is mapped, the frame within is allocated from the
620    /// MMIO memory.
621    MappedIoMem {
622        /// The physical address of the corresponding I/O memory.
623        paddr: Paddr,
624        /// The property of the slot.
625        prop: PageProperty,
626    },
627}
628
629impl VmQueriedItem {
630    /// Returns the page property of the mapped item.
631    pub fn prop(&self) -> &PageProperty {
632        match self {
633            Self::MappedRam { prop, .. } => prop,
634            Self::MappedIoMem { prop, .. } => prop,
635        }
636    }
637}
638
639/// Internal representation of a VM item.
640///
641/// This is kept private to ensure memory safety. The public interface
642/// should use `VmQueriedItem` for querying mapping information.
643#[derive(Debug, Clone, PartialEq)]
644pub(crate) struct VmItem {
645    prop: PageProperty,
646    mapped_item: MappedItem,
647}
648
649#[derive(Debug, Clone, PartialEq)]
650enum MappedItem {
651    TrackedFrame(UFrame),
652    UntrackedIoMem { paddr: Paddr, level: PagingLevel },
653}
654
655impl VmItem {
656    /// Creates a new `VmItem` that maps a tracked frame.
657    pub(super) fn new_tracked(frame: UFrame, prop: PageProperty) -> Self {
658        Self {
659            prop,
660            mapped_item: MappedItem::TrackedFrame(frame),
661        }
662    }
663
664    /// Creates a new `VmItem` that maps an untracked I/O memory.
665    fn new_untracked_io(paddr: Paddr, prop: PageProperty) -> Self {
666        Self {
667            prop,
668            mapped_item: MappedItem::UntrackedIoMem { paddr, level: 1 },
669        }
670    }
671}
672
673impl From<VmItem> for VmQueriedItem {
674    fn from(item: VmItem) -> Self {
675        match item.mapped_item {
676            MappedItem::TrackedFrame(frame) => VmQueriedItem::MappedRam {
677                frame,
678                prop: item.prop,
679            },
680            MappedItem::UntrackedIoMem { paddr, level } => {
681                debug_assert_eq!(level, 1);
682                VmQueriedItem::MappedIoMem {
683                    paddr,
684                    prop: item.prop,
685                }
686            }
687        }
688    }
689}
690
691#[derive(Clone, Debug)]
692pub(crate) struct UserPtConfig {}
693
694// SAFETY: `item_into_raw` and `item_from_raw` are implemented correctly,
695unsafe impl PageTableConfig for UserPtConfig {
696    const TOP_LEVEL_INDEX_RANGE: Range<usize> = 0..256;
697
698    type E = PageTableEntry;
699    type C = PagingConsts;
700
701    type Item = VmItem;
702
703    fn item_into_raw(item: Self::Item) -> (Paddr, PagingLevel, PageProperty) {
704        match item.mapped_item {
705            MappedItem::TrackedFrame(frame) => {
706                let mut prop = item.prop;
707                prop.priv_flags -= PrivilegedPageFlags::AVAIL1; // Clear AVAIL1 for tracked frames
708                let level = frame.map_level();
709                let paddr = frame.into_raw();
710                (paddr, level, prop)
711            }
712            MappedItem::UntrackedIoMem { paddr, level } => {
713                let mut prop = item.prop;
714                prop.priv_flags |= PrivilegedPageFlags::AVAIL1; // Set AVAIL1 for I/O memory
715                (paddr, level, prop)
716            }
717        }
718    }
719
720    unsafe fn item_from_raw(paddr: Paddr, level: PagingLevel, prop: PageProperty) -> Self::Item {
721        debug_assert_eq!(level, 1);
722        if prop.priv_flags.contains(PrivilegedPageFlags::AVAIL1) {
723            // AVAIL1 is set, this is I/O memory.
724            VmItem::new_untracked_io(paddr, prop)
725        } else {
726            // AVAIL1 is clear, this is tracked memory.
727            // SAFETY: The caller ensures safety.
728            let frame = unsafe { Frame::<dyn AnyUFrameMeta>::from_raw(paddr) };
729            VmItem::new_tracked(frame, prop)
730        }
731    }
732}