ostd/io/io_mem/mod.rs
1// SPDX-License-Identifier: MPL-2.0
2
3//! I/O memory and its allocator that allocates memory I/O (MMIO) to device drivers.
4
5mod allocator;
6
7use core::{
8 marker::PhantomData,
9 ops::{Deref, Range},
10};
11
12use align_ext::AlignExt;
13
14pub(crate) use self::allocator::IoMemAllocatorBuilder;
15pub(super) use self::allocator::init;
16use crate::{
17 Error,
18 mm::{
19 HasPaddr, HasSize, Infallible, PAGE_SIZE, Paddr, PodOnce, VmReader, VmWriter,
20 io_util::{HasVmReaderWriter, VmReaderWriterIdentity},
21 kspace::kvirt_area::KVirtArea,
22 page_prop::{CachePolicy, PageFlags, PageProperty, PrivilegedPageFlags},
23 },
24 prelude::*,
25};
26
27/// A marker type used for [`IoMem`],
28/// representing that the underlying MMIO is used for security-sensitive operations.
29#[derive(Clone, Debug)]
30pub(crate) enum Sensitive {}
31
32/// A marker type used for [`IoMem`],
33/// representing that the underlying MMIO is used for security-insensitive operations.
34#[derive(Clone, Debug)]
35pub enum Insensitive {}
36
37/// I/O memory.
38#[derive(Debug, Clone)]
39pub struct IoMem<SecuritySensitivity = Insensitive> {
40 kvirt_area: Arc<KVirtArea>,
41 // The actually used range for MMIO is `kvirt_area.start + offset..kvirt_area.start + offset + limit`
42 offset: usize,
43 limit: usize,
44 pa: Paddr,
45 cache_policy: CachePolicy,
46 phantom: PhantomData<SecuritySensitivity>,
47}
48
49impl<SecuritySensitivity> IoMem<SecuritySensitivity> {
50 /// Slices the `IoMem`, returning another `IoMem` representing the subslice.
51 ///
52 /// # Panics
53 ///
54 /// This method will panic if the range is empty or out of bounds.
55 pub fn slice(&self, range: Range<usize>) -> Self {
56 // This ensures `range.start < range.end` and `range.end <= limit`.
57 assert!(!range.is_empty() && range.end <= self.limit);
58
59 // We've checked the range is in bounds, so we can construct the new `IoMem` safely.
60 Self {
61 kvirt_area: self.kvirt_area.clone(),
62 offset: self.offset + range.start,
63 limit: range.len(),
64 pa: self.pa + range.start,
65 cache_policy: self.cache_policy,
66 phantom: PhantomData,
67 }
68 }
69
70 /// Creates a new `IoMem`.
71 ///
72 /// # Safety
73 ///
74 /// 1. This function must be called after the kernel page table is activated.
75 /// 2. The given physical address range must be in the I/O memory region.
76 /// 3. Reading from or writing to I/O memory regions may have side effects.
77 /// If `SecuritySensitivity` is `Insensitive`, those side effects must
78 /// not cause soundness problems (e.g., they must not corrupt the kernel
79 /// memory).
80 pub(crate) unsafe fn new(range: Range<Paddr>, flags: PageFlags, cache: CachePolicy) -> Self {
81 let first_page_start = range.start.align_down(PAGE_SIZE);
82 let last_page_end = range.end.align_up(PAGE_SIZE);
83
84 let frames_range = first_page_start..last_page_end;
85 let area_size = frames_range.len();
86
87 #[cfg(target_arch = "x86_64")]
88 let priv_flags = crate::arch::if_tdx_enabled!({
89 assert!(
90 first_page_start == range.start && last_page_end == range.end,
91 "I/O memory is not page aligned, which cannot be unprotected in TDX: {:#x?}..{:#x?}",
92 range.start,
93 range.end,
94 );
95
96 let num_pages = area_size / PAGE_SIZE;
97 // SAFETY:
98 // - The range `first_page_start..last_page_end` is always page aligned.
99 // - FIXME: We currently do not limit the I/O memory allocator with the maximum GPA,
100 // so the address range may not fall in the GPA limit.
101 // - FIXME: The I/O memory can be at a high address, so it may not be contained in the
102 // linear mapping.
103 // - The caller guarantees that operations on the I/O memory do not have any side
104 // effects that may cause soundness problems, so the pages can safely be viewed as
105 // untyped memory.
106 unsafe { crate::arch::tdx_guest::unprotect_gpa_range(first_page_start, num_pages).unwrap() };
107
108 PrivilegedPageFlags::SHARED
109 } else {
110 PrivilegedPageFlags::empty()
111 });
112 #[cfg(not(target_arch = "x86_64"))]
113 let priv_flags = PrivilegedPageFlags::empty();
114
115 let prop = PageProperty {
116 flags,
117 cache,
118 priv_flags,
119 };
120
121 // SAFETY: The caller of `IoMem::new()` ensures that the given
122 // physical address range is I/O memory, so it is safe to map.
123 let kva = unsafe { KVirtArea::map_untracked_frames(area_size, 0, frames_range, prop) };
124
125 Self {
126 kvirt_area: Arc::new(kva),
127 offset: range.start - first_page_start,
128 limit: range.len(),
129 pa: range.start,
130 cache_policy: cache,
131 phantom: PhantomData,
132 }
133 }
134
135 /// Returns the cache policy of this `IoMem`.
136 pub fn cache_policy(&self) -> CachePolicy {
137 self.cache_policy
138 }
139}
140
141#[cfg_attr(target_arch = "loongarch64", expect(unused))]
142impl IoMem<Sensitive> {
143 /// Reads a value of the `PodOnce` type at the specified offset using one
144 /// non-tearing memory load.
145 ///
146 /// Except that the offset is specified explicitly, the semantics of this
147 /// method is the same as [`VmReader::read_once`].
148 ///
149 /// # Safety
150 ///
151 /// The caller must ensure that the offset and the read operation is valid,
152 /// e.g., follows the specification when used for implementing drivers, does
153 /// not cause any out-of-bounds access, and does not cause unsound side
154 /// effects (e.g., corrupting the kernel memory).
155 pub(crate) unsafe fn read_once<T: PodOnce>(&self, offset: usize) -> T {
156 debug_assert!(offset + size_of::<T>() < self.limit);
157 let ptr = (self.kvirt_area.deref().start() + self.offset + offset) as *const T;
158 // SAFETY: The safety of the read operation's semantics is upheld by the caller.
159 unsafe { core::ptr::read_volatile(ptr) }
160 }
161
162 /// Writes a value of the `PodOnce` type at the specified offset using one
163 /// non-tearing memory store.
164 ///
165 /// Except that the offset is specified explicitly, the semantics of this
166 /// method is the same as [`VmWriter::write_once`].
167 ///
168 /// # Safety
169 ///
170 /// The caller must ensure that the offset and the write operation is valid,
171 /// e.g., follows the specification when used for implementing drivers, does
172 /// not cause any out-of-bounds access, and does not cause unsound side
173 /// effects (e.g., corrupting the kernel memory).
174 pub(crate) unsafe fn write_once<T: PodOnce>(&self, offset: usize, value: &T) {
175 debug_assert!(offset + size_of::<T>() < self.limit);
176 let ptr = (self.kvirt_area.deref().start() + self.offset + offset) as *mut T;
177 // SAFETY: The safety of the write operation's semantics is upheld by the caller.
178 unsafe { core::ptr::write_volatile(ptr, *value) };
179 }
180}
181
182impl IoMem<Insensitive> {
183 /// Acquires an `IoMem` instance for the given range.
184 ///
185 /// The I/O memory cache policy is set to uncacheable by default.
186 pub fn acquire(range: Range<Paddr>) -> Result<IoMem<Insensitive>> {
187 Self::acquire_with_cache_policy(range, CachePolicy::Uncacheable)
188 }
189
190 /// Acquires an `IoMem` instance for the given range with the specified cache policy.
191 pub fn acquire_with_cache_policy(
192 range: Range<Paddr>,
193 cache_policy: CachePolicy,
194 ) -> Result<IoMem<Insensitive>> {
195 allocator::IO_MEM_ALLOCATOR
196 .get()
197 .unwrap()
198 .acquire(range, cache_policy)
199 .ok_or(Error::AccessDenied)
200 }
201}
202
203// For now, we reuse `VmReader` and `VmWriter` to access I/O memory.
204//
205// Note that I/O memory is not normal typed or untyped memory. Strictly speaking, it is not
206// "memory", but rather I/O ports that communicate directly with the hardware. However, this code
207// is in OSTD, so we can rely on the implementation details of `VmReader` and `VmWriter`, which we
208// know are also suitable for accessing I/O memory.
209
210impl HasVmReaderWriter for IoMem<Insensitive> {
211 type Types = VmReaderWriterIdentity;
212
213 fn reader(&self) -> VmReader<'_, Infallible> {
214 // SAFETY: The constructor of the `IoMem` structure has already ensured the
215 // safety of reading from the mapped physical address, and the mapping is valid.
216 unsafe {
217 VmReader::from_kernel_space(
218 (self.kvirt_area.deref().start() + self.offset) as *mut u8,
219 self.limit,
220 )
221 }
222 }
223
224 fn writer(&self) -> VmWriter<'_, Infallible> {
225 // SAFETY: The constructor of the `IoMem` structure has already ensured the
226 // safety of writing to the mapped physical address, and the mapping is valid.
227 unsafe {
228 VmWriter::from_kernel_space(
229 (self.kvirt_area.deref().start() + self.offset) as *mut u8,
230 self.limit,
231 )
232 }
233 }
234}
235
236impl<SecuritySensitivity> HasPaddr for IoMem<SecuritySensitivity> {
237 fn paddr(&self) -> Paddr {
238 self.pa
239 }
240}
241
242impl<SecuritySensitivity> HasSize for IoMem<SecuritySensitivity> {
243 fn size(&self) -> usize {
244 self.limit
245 }
246}
247
248impl<SecuritySensitivity> Drop for IoMem<SecuritySensitivity> {
249 fn drop(&mut self) {
250 // TODO: Multiple `IoMem` instances should not overlap, we should refactor the driver code and
251 // remove the `Clone` and `IoMem::slice`. After refactoring, the `Drop` can be implemented to recycle
252 // the `IoMem`.
253 }
254}