ostd/io/io_mem/
mod.rs

1// SPDX-License-Identifier: MPL-2.0
2
3//! I/O memory and its allocator that allocates memory I/O (MMIO) to device drivers.
4
5mod allocator;
6
7use core::{
8    marker::PhantomData,
9    ops::{Deref, Range},
10};
11
12use align_ext::AlignExt;
13
14pub(crate) use self::allocator::IoMemAllocatorBuilder;
15pub(super) use self::allocator::init;
16use crate::{
17    Error,
18    cpu::{AtomicCpuSet, CpuSet},
19    mm::{
20        HasPaddr, HasSize, Infallible, PAGE_SIZE, Paddr, PodOnce, VmReader, VmWriter,
21        io_util::{HasVmReaderWriter, VmReaderWriterIdentity},
22        kspace::kvirt_area::KVirtArea,
23        page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
24        tlb::{TlbFlushOp, TlbFlusher},
25    },
26    prelude::*,
27    task::disable_preempt,
28};
29
30/// A marker type used for [`IoMem`],
31/// representing that the underlying MMIO is used for security-sensitive operations.
32#[derive(Clone, Debug)]
33pub(crate) enum Sensitive {}
34
35/// A marker type used for [`IoMem`],
36/// representing that the underlying MMIO is used for security-insensitive operations.
37#[derive(Clone, Debug)]
38pub enum Insensitive {}
39
40/// I/O memory.
41#[derive(Debug, Clone)]
42pub struct IoMem<SecuritySensitivity = Insensitive> {
43    kvirt_area: Arc<KVirtArea>,
44    // The actually used range for MMIO is `kvirt_area.start + offset..kvirt_area.start + offset + limit`
45    offset: usize,
46    limit: usize,
47    pa: Paddr,
48    cache_policy: CachePolicy,
49    phantom: PhantomData<SecuritySensitivity>,
50}
51
52impl<SecuritySensitivity> IoMem<SecuritySensitivity> {
53    /// Slices the `IoMem`, returning another `IoMem` representing the subslice.
54    ///
55    /// # Panics
56    ///
57    /// This method will panic if the range is empty or out of bounds.
58    pub fn slice(&self, range: Range<usize>) -> Self {
59        // This ensures `range.start < range.end` and `range.end <= limit`.
60        assert!(!range.is_empty() && range.end <= self.limit);
61
62        // We've checked the range is in bounds, so we can construct the new `IoMem` safely.
63        Self {
64            kvirt_area: self.kvirt_area.clone(),
65            offset: self.offset + range.start,
66            limit: range.len(),
67            pa: self.pa + range.start,
68            cache_policy: self.cache_policy,
69            phantom: PhantomData,
70        }
71    }
72
73    /// Creates a new `IoMem`.
74    ///
75    /// # Safety
76    ///
77    /// 1. This function must be called after the kernel page table is activated.
78    /// 2. The given physical address range must be in the I/O memory region.
79    /// 3. Reading from or writing to I/O memory regions may have side effects.
80    ///    If `SecuritySensitivity` is `Insensitive`, those side effects must
81    ///    not cause soundness problems (e.g., they must not corrupt the kernel
82    ///    memory).
83    pub(crate) unsafe fn new(range: Range<Paddr>, flags: PageFlags, cache: CachePolicy) -> Self {
84        let first_page_start = range.start.align_down(PAGE_SIZE);
85        let last_page_end = range.end.align_up(PAGE_SIZE);
86
87        let frames_range = first_page_start..last_page_end;
88        let area_size = frames_range.len();
89
90        #[cfg(target_arch = "x86_64")]
91        let priv_flags = crate::arch::if_tdx_enabled!({
92            assert!(
93                first_page_start == range.start && last_page_end == range.end,
94                "I/O memory is not page aligned, which cannot be unprotected in TDX: {:#x?}..{:#x?}",
95                range.start,
96                range.end,
97            );
98
99            // SAFETY:
100            //  - The range `first_page_start..last_page_end` is always page aligned.
101            //  - FIXME: We currently do not limit the I/O memory allocator with the maximum GPA,
102            //    so the address range may not fall in the GPA limit.
103            //  - The caller guarantees that operations on the I/O memory do not have any side
104            //    effects that may cause soundness problems, so the pages can safely be viewed as
105            //    untyped memory.
106            unsafe { crate::arch::tdx_guest::unprotect_gpa_tdvm_call(first_page_start, area_size).unwrap() };
107
108            PrivilegedPageFlags::SHARED
109        } else {
110            PrivilegedPageFlags::empty()
111        });
112        #[cfg(not(target_arch = "x86_64"))]
113        let priv_flags = PrivilegedPageFlags::empty();
114
115        let prop = PageProperty {
116            flags,
117            cache,
118            priv_flags,
119        };
120
121        let kva = {
122            // SAFETY: The caller of `IoMem::new()` ensures that the given
123            // physical address range is I/O memory, so it is safe to map.
124            let kva = unsafe { KVirtArea::map_untracked_frames(area_size, 0, frames_range, prop) };
125
126            let target_cpus = AtomicCpuSet::new(CpuSet::new_full());
127            let mut flusher = TlbFlusher::new(&target_cpus, disable_preempt());
128            flusher.issue_tlb_flush(TlbFlushOp::for_range(kva.range()));
129            flusher.dispatch_tlb_flush();
130            flusher.sync_tlb_flush();
131
132            kva
133        };
134
135        Self {
136            kvirt_area: Arc::new(kva),
137            offset: range.start - first_page_start,
138            limit: range.len(),
139            pa: range.start,
140            cache_policy: cache,
141            phantom: PhantomData,
142        }
143    }
144
145    /// Returns the cache policy of this `IoMem`.
146    pub fn cache_policy(&self) -> CachePolicy {
147        self.cache_policy
148    }
149}
150
151#[cfg_attr(target_arch = "loongarch64", expect(unused))]
152impl IoMem<Sensitive> {
153    /// Reads a value of the `PodOnce` type at the specified offset using one
154    /// non-tearing memory load.
155    ///
156    /// Except that the offset is specified explicitly, the semantics of this
157    /// method is the same as [`VmReader::read_once`].
158    ///
159    /// # Safety
160    ///
161    /// The caller must ensure that the offset and the read operation is valid,
162    /// e.g., follows the specification when used for implementing drivers, does
163    /// not cause any out-of-bounds access, and does not cause unsound side
164    /// effects (e.g., corrupting the kernel memory).
165    pub(crate) unsafe fn read_once<T: PodOnce>(&self, offset: usize) -> T {
166        debug_assert!(offset + size_of::<T>() < self.limit);
167        let ptr = (self.kvirt_area.deref().start() + self.offset + offset) as *const T;
168        // SAFETY: The safety of the read operation's semantics is upheld by the caller.
169        unsafe { core::ptr::read_volatile(ptr) }
170    }
171
172    /// Writes a value of the `PodOnce` type at the specified offset using one
173    /// non-tearing memory store.
174    ///
175    /// Except that the offset is specified explicitly, the semantics of this
176    /// method is the same as [`VmWriter::write_once`].
177    ///
178    /// # Safety
179    ///
180    /// The caller must ensure that the offset and the write operation is valid,
181    /// e.g., follows the specification when used for implementing drivers, does
182    /// not cause any out-of-bounds access, and does not cause unsound side
183    /// effects (e.g., corrupting the kernel memory).
184    pub(crate) unsafe fn write_once<T: PodOnce>(&self, offset: usize, value: &T) {
185        debug_assert!(offset + size_of::<T>() < self.limit);
186        let ptr = (self.kvirt_area.deref().start() + self.offset + offset) as *mut T;
187        // SAFETY: The safety of the write operation's semantics is upheld by the caller.
188        unsafe { core::ptr::write_volatile(ptr, *value) };
189    }
190}
191
192impl IoMem<Insensitive> {
193    /// Acquires an `IoMem` instance for the given range.
194    ///
195    /// The I/O memory cache policy is set to uncacheable by default.
196    pub fn acquire(range: Range<Paddr>) -> Result<IoMem<Insensitive>> {
197        Self::acquire_with_cache_policy(range, CachePolicy::Uncacheable)
198    }
199
200    /// Acquires an `IoMem` instance for the given range with the specified cache policy.
201    pub fn acquire_with_cache_policy(
202        range: Range<Paddr>,
203        cache_policy: CachePolicy,
204    ) -> Result<IoMem<Insensitive>> {
205        allocator::IO_MEM_ALLOCATOR
206            .get()
207            .unwrap()
208            .acquire(range, cache_policy)
209            .ok_or(Error::AccessDenied)
210    }
211}
212
213// For now, we reuse `VmReader` and `VmWriter` to access I/O memory.
214//
215// Note that I/O memory is not normal typed or untyped memory. Strictly speaking, it is not
216// "memory", but rather I/O ports that communicate directly with the hardware. However, this code
217// is in OSTD, so we can rely on the implementation details of `VmReader` and `VmWriter`, which we
218// know are also suitable for accessing I/O memory.
219
220impl HasVmReaderWriter for IoMem<Insensitive> {
221    type Types = VmReaderWriterIdentity;
222
223    fn reader(&self) -> VmReader<'_, Infallible> {
224        // SAFETY: The constructor of the `IoMem` structure has already ensured the
225        // safety of reading from the mapped physical address, and the mapping is valid.
226        unsafe {
227            VmReader::from_kernel_space(
228                (self.kvirt_area.deref().start() + self.offset) as *mut u8,
229                self.limit,
230            )
231        }
232    }
233
234    fn writer(&self) -> VmWriter<'_, Infallible> {
235        // SAFETY: The constructor of the `IoMem` structure has already ensured the
236        // safety of writing to the mapped physical address, and the mapping is valid.
237        unsafe {
238            VmWriter::from_kernel_space(
239                (self.kvirt_area.deref().start() + self.offset) as *mut u8,
240                self.limit,
241            )
242        }
243    }
244}
245
246impl<SecuritySensitivity> HasPaddr for IoMem<SecuritySensitivity> {
247    fn paddr(&self) -> Paddr {
248        self.pa
249    }
250}
251
252impl<SecuritySensitivity> HasSize for IoMem<SecuritySensitivity> {
253    fn size(&self) -> usize {
254        self.limit
255    }
256}
257
258impl<SecuritySensitivity> Drop for IoMem<SecuritySensitivity> {
259    fn drop(&mut self) {
260        // TODO: Multiple `IoMem` instances should not overlap, we should refactor the driver code and
261        // remove the `Clone` and `IoMem::slice`. After refactoring, the `Drop` can be implemented to recycle
262        // the `IoMem`.
263    }
264}