ostd/mm/dma/
dma_stream.rs

1// SPDX-License-Identifier: MPL-2.0
2
3use core::{fmt::Debug, marker::PhantomData, mem::ManuallyDrop, ops::Range};
4
5use super::util::{
6    alloc_kva, cvm_need_private_protection, prepare_dma, split_daddr, unprepare_dma,
7};
8use crate::{
9    arch::mm::can_sync_dma,
10    error::Error,
11    mm::{
12        Daddr, FrameAllocOptions, HasDaddr, HasPaddr, HasPaddrRange, HasSize, Infallible,
13        PAGE_SIZE, Paddr, Split, USegment, VmReader, VmWriter,
14        io_util::{HasVmReaderWriter, VmReaderWriterResult},
15        kspace::kvirt_area::KVirtArea,
16        paddr_to_vaddr,
17    },
18};
19
20/// [`DmaDirection`] limits the data flow direction of [`DmaStream`] and
21/// prevents users from reading and writing to [`DmaStream`] unexpectedly.
22pub trait DmaDirection: 'static + Debug + private::Sealed {
23    /// Whether the CPU can read data from the device.
24    const CAN_READ_FROM_DEVICE: bool;
25    /// Whether the CPU can write data to the device.
26    const CAN_WRITE_TO_DEVICE: bool;
27}
28
29mod private {
30    /// To avoid users implement `DmaDirection` and triggers unreachable code in
31    /// functions like [`crate::arch::mm::sync_dma_range`], or bypasses checks
32    /// in [`crate::mm::io_util::HasVmReaderWriter`].
33    pub trait Sealed {}
34}
35
36/// Data flows to the device.
37///
38/// From the perspective of the kernel, this memory region is writable.
39#[derive(Debug)]
40pub enum ToDevice {}
41
42impl private::Sealed for ToDevice {}
43impl DmaDirection for ToDevice {
44    const CAN_READ_FROM_DEVICE: bool = false;
45    const CAN_WRITE_TO_DEVICE: bool = true;
46}
47
48/// Data flows from the device.
49///
50/// From the perspective of the kernel, this memory region is read-only.
51#[derive(Debug)]
52pub enum FromDevice {}
53
54impl private::Sealed for FromDevice {}
55impl DmaDirection for FromDevice {
56    const CAN_READ_FROM_DEVICE: bool = true;
57    const CAN_WRITE_TO_DEVICE: bool = false;
58}
59
60/// Data flows both from and to the device.
61#[derive(Debug)]
62pub enum FromAndToDevice {}
63
64impl private::Sealed for FromAndToDevice {}
65impl DmaDirection for FromAndToDevice {
66    const CAN_READ_FROM_DEVICE: bool = true;
67    const CAN_WRITE_TO_DEVICE: bool = true;
68}
69
70/// A DMA memory object with streaming access.
71///
72/// The kernel must synchronize the data by [`sync_from_device`]/[`sync_to_device`]
73/// when interacting with the device.
74///
75/// [`sync_from_device`]: DmaStream::sync_from_device
76/// [`sync_to_device`]: DmaStream::sync_to_device
77#[derive(Debug)]
78pub struct DmaStream<D: DmaDirection = FromAndToDevice> {
79    inner: Inner,
80    map_daddr: Option<Daddr>,
81    is_cache_coherent: bool,
82    _phantom: PhantomData<D>,
83}
84
85#[derive(Debug)]
86enum Inner {
87    Segment(USegment),
88    Kva(KVirtArea, Paddr),
89    Both(KVirtArea, Paddr, USegment),
90}
91
92impl<D: DmaDirection> DmaStream<D> {
93    /// Allocates a region of physical memory for streaming DMA access.
94    ///
95    /// The memory of the newly-allocated DMA buffer is initialized to zeros.
96    /// This method is only available when `D` is [`ToDevice`] or
97    /// [`FromAndToDevice`], as zeroing requires write access to the buffer.
98    ///
99    /// The `is_cache_coherent` argument specifies whether the target device
100    /// that the DMA mapping is prepared for can access the main memory in a
101    /// CPU cache coherent way or not.
102    ///
103    /// # Comparison with [`DmaStream::map`]
104    ///
105    /// This method is semantically equivalent to allocating a [`USegment`] via
106    /// [`FrameAllocOptions::alloc_segment`] and then mapping it with
107    /// [`DmaStream::map`]. However, [`DmaStream::alloc`] combines these two
108    /// operations and can be more efficient in certain scenarios, particularly
109    /// in confidential VMs, where the overhead of bounce buffers can be
110    /// avoided.
111    pub fn alloc(nframes: usize, is_cache_coherent: bool) -> Result<Self, Error> {
112        const { assert!(D::CAN_WRITE_TO_DEVICE) };
113
114        Self::alloc_uninit(nframes, is_cache_coherent).and_then(|dma| {
115            dma.writer()?.fill_zeros(dma.size());
116            Ok(dma)
117        })
118    }
119
120    /// Allocates a region of physical memory for streaming DMA access
121    /// without initialization.
122    ///
123    /// This method is the same as [`DmaStream::alloc`]
124    /// except that it skips zeroing the memory of newly-allocated DMA region.
125    pub fn alloc_uninit(nframes: usize, is_cache_coherent: bool) -> Result<Self, Error> {
126        let cvm = cvm_need_private_protection();
127
128        let (inner, paddr_range) = if (can_sync_dma() || is_cache_coherent) && !cvm {
129            let segment: USegment = FrameAllocOptions::new()
130                .zeroed(false)
131                .alloc_segment(nframes)?
132                .into();
133            let paddr_range = segment.paddr_range();
134
135            (Inner::Segment(segment), paddr_range)
136        } else {
137            let (kva, paddr) = alloc_kva(nframes, can_sync_dma() || is_cache_coherent)?;
138
139            (Inner::Kva(kva, paddr), paddr..paddr + nframes * PAGE_SIZE)
140        };
141
142        // SAFETY: The physical address range is untyped DMA memory before `drop`.
143        let map_daddr = unsafe { prepare_dma(&paddr_range) };
144
145        Ok(Self {
146            inner,
147            map_daddr,
148            is_cache_coherent,
149            _phantom: PhantomData,
150        })
151    }
152
153    /// Establishes DMA stream mapping for a given [`USegment`].
154    ///
155    /// The `is_cache_coherent` argument specifies whether the target device
156    /// that the DMA mapping is prepared for can access the main memory in a
157    /// CPU cache coherent way or not.
158    pub fn map(segment: USegment, is_cache_coherent: bool) -> Result<Self, Error> {
159        let cvm = cvm_need_private_protection();
160        let size = segment.size();
161
162        let (inner, paddr) = if (can_sync_dma() || is_cache_coherent) && !cvm {
163            let paddr = segment.paddr();
164
165            (Inner::Segment(segment), paddr)
166        } else {
167            let (kva, paddr) = alloc_kva(size / PAGE_SIZE, is_cache_coherent)?;
168
169            (Inner::Both(kva, paddr, segment), paddr)
170        };
171
172        let paddr_range = paddr..paddr + size;
173
174        // SAFETY: The physical address range is untyped DMA memory before `drop`.
175        let map_daddr = unsafe { prepare_dma(&paddr_range) };
176
177        Ok(Self {
178            inner,
179            map_daddr,
180            is_cache_coherent,
181            _phantom: PhantomData,
182        })
183    }
184
185    /// Synchronizes the streaming DMA mapping data from the device.
186    ///
187    /// This method should be called when the data of the streaming DMA mapping
188    /// has been updated by the device side. Before the CPU side starts to read
189    /// (e.g., using [`read_bytes`]), it must call the [`Self::sync_from_device`]
190    /// method first.
191    ///
192    /// [`read_bytes`]: crate::mm::VmIo::read_bytes
193    pub fn sync_from_device(&self, byte_range: Range<usize>) -> Result<(), Error> {
194        const { assert!(D::CAN_READ_FROM_DEVICE) };
195
196        self.sync_impl(byte_range, true)
197    }
198
199    /// Synchronizes the streaming DMA mapping data to the device.
200    ///
201    /// This method should be called when the data of the streaming DMA mapping
202    /// has been updated by the CPU side (e.g., using [`write_bytes`]). Before
203    /// the CPU side notifies the device side to read, it must call the
204    /// [`Self::sync_to_device`] method first.
205    ///
206    /// [`write_bytes`]: crate::mm::VmIo::write_bytes
207    pub fn sync_to_device(&self, byte_range: Range<usize>) -> Result<(), Error> {
208        const { assert!(D::CAN_WRITE_TO_DEVICE) };
209
210        self.sync_impl(byte_range, false)
211    }
212
213    fn sync_impl(&self, byte_range: Range<usize>, is_from_device: bool) -> Result<(), Error> {
214        let size = self.size();
215        if byte_range.end > size || byte_range.start > size {
216            return Err(Error::InvalidArgs);
217        }
218        if self.is_cache_coherent {
219            return Ok(());
220        }
221
222        let va_range = match &self.inner {
223            Inner::Segment(segment) => {
224                let pa_range = segment.paddr_range();
225                paddr_to_vaddr(pa_range.start)..paddr_to_vaddr(pa_range.end)
226            }
227            Inner::Kva(kva, _) => {
228                if !can_sync_dma() {
229                    // The KVA is mapped as uncachable.
230                    return Ok(());
231                }
232                kva.range()
233            }
234            Inner::Both(kva, _, seg) => {
235                self.sync_via_copying(byte_range, is_from_device, seg, kva);
236                return Ok(());
237            }
238        };
239        let range = va_range.start + byte_range.start..va_range.start + byte_range.end;
240
241        // SAFETY: We've checked that the range is inbound, so the virtual
242        // address range and the DMA direction correspond to a DMA region
243        // (they're part of `self`).
244        unsafe { crate::arch::mm::sync_dma_range::<D>(range) };
245
246        Ok(())
247    }
248
249    fn sync_via_copying(
250        &self,
251        byte_range: Range<usize>,
252        is_from_device: bool,
253        seg: &USegment,
254        kva: &KVirtArea,
255    ) {
256        let skip = byte_range.start;
257        let limit = byte_range.len();
258
259        let (mut reader, mut writer) = if is_from_device {
260            // SAFETY:
261            //  - The memory range points to untyped memory.
262            //  - The KVA is alive in this scope.
263            //  - Using `VmReader` and `VmWriter` is the only way to access the KVA.
264            let kva_reader =
265                unsafe { VmReader::from_kernel_space(kva.start() as *const u8, kva.size()) };
266
267            (kva_reader, seg.writer())
268        } else {
269            // SAFETY:
270            //  - The memory range points to untyped memory.
271            //  - The KVA is alive in this scope.
272            //  - Using `VmReader` and `VmWriter` is the only way to access the KVA.
273            let kva_writer =
274                unsafe { VmWriter::from_kernel_space(kva.start() as *mut u8, kva.size()) };
275
276            (seg.reader(), kva_writer)
277        };
278
279        writer
280            .skip(skip)
281            .limit(limit)
282            .write(reader.skip(skip).limit(limit));
283    }
284}
285
286impl<D: DmaDirection> Split for DmaStream<D> {
287    fn split(self, offset: usize) -> (Self, Self) {
288        assert!(offset.is_multiple_of(PAGE_SIZE));
289        assert!(0 < offset && offset < self.size());
290
291        let (inner, map_daddr, is_cache_coherent) = {
292            let this = ManuallyDrop::new(self);
293            (
294                // SAFETY: `this.inner` will never be used or dropped later.
295                unsafe { core::ptr::read(&this.inner as *const Inner) },
296                this.map_daddr,
297                this.is_cache_coherent,
298            )
299        };
300
301        let (inner1, inner2) = match inner {
302            Inner::Segment(segment) => {
303                let (s1, s2) = segment.split(offset);
304                (Inner::Segment(s1), Inner::Segment(s2))
305            }
306            Inner::Kva(kva, paddr) => {
307                let (kva1, kva2) = kva.split(offset);
308                let (paddr1, paddr2) = (paddr, paddr + offset);
309                (Inner::Kva(kva1, paddr1), Inner::Kva(kva2, paddr2))
310            }
311            Inner::Both(kva, paddr, segment) => {
312                let (kva1, kva2) = kva.split(offset);
313                let (paddr1, paddr2) = (paddr, paddr + offset);
314                let (s1, s2) = segment.split(offset);
315                (Inner::Both(kva1, paddr1, s1), Inner::Both(kva2, paddr2, s2))
316            }
317        };
318
319        let (daddr1, daddr2) = split_daddr(map_daddr, offset);
320
321        (
322            Self {
323                inner: inner1,
324                map_daddr: daddr1,
325                is_cache_coherent,
326                _phantom: PhantomData,
327            },
328            Self {
329                inner: inner2,
330                map_daddr: daddr2,
331                is_cache_coherent,
332                _phantom: PhantomData,
333            },
334        )
335    }
336}
337
338impl<D: DmaDirection> Drop for DmaStream<D> {
339    fn drop(&mut self) {
340        // SAFETY: The physical address range was prepeared in `map`.
341        unsafe { unprepare_dma(&self.paddr_range(), self.map_daddr) };
342    }
343}
344
345impl<D: DmaDirection> HasPaddr for DmaStream<D> {
346    fn paddr(&self) -> Paddr {
347        match &self.inner {
348            Inner::Segment(segment) => segment.paddr(),
349            Inner::Kva(_, paddr) | Inner::Both(_, paddr, _) => *paddr, // the mapped PA, not the buffer's PA
350        }
351    }
352}
353
354impl<D: DmaDirection> HasDaddr for DmaStream<D> {
355    fn daddr(&self) -> Daddr {
356        self.map_daddr.unwrap_or_else(|| self.paddr() as Daddr)
357    }
358}
359
360impl<D: DmaDirection> HasSize for DmaStream<D> {
361    fn size(&self) -> usize {
362        match &self.inner {
363            Inner::Segment(segment) => segment.size(),
364            Inner::Kva(kva, _) => kva.size(),
365            Inner::Both(kva, _, segment) => {
366                debug_assert_eq!(kva.size(), segment.size());
367                kva.size()
368            }
369        }
370    }
371}
372
373impl<D: DmaDirection> HasVmReaderWriter for DmaStream<D> {
374    type Types = VmReaderWriterResult;
375
376    fn reader(&self) -> Result<VmReader<'_, Infallible>, Error> {
377        if !D::CAN_READ_FROM_DEVICE {
378            return Err(Error::AccessDenied);
379        }
380        match &self.inner {
381            Inner::Segment(seg) | Inner::Both(_, _, seg) => Ok(seg.reader()),
382            Inner::Kva(kva, _) => {
383                // SAFETY:
384                //  - Although the memory range points to typed memory, the range is for DMA
385                //    and the access is not by linear mapping.
386                //  - The KVA is alive during the lifetime `'_`.
387                //  - Using `VmReader` and `VmWriter` is the only way to access the KVA.
388                unsafe {
389                    Ok(VmReader::from_kernel_space(
390                        kva.start() as *const u8,
391                        kva.size(),
392                    ))
393                }
394            }
395        }
396    }
397
398    fn writer(&self) -> Result<VmWriter<'_, Infallible>, Error> {
399        if !D::CAN_WRITE_TO_DEVICE {
400            return Err(Error::AccessDenied);
401        }
402        match &self.inner {
403            Inner::Segment(seg) | Inner::Both(_, _, seg) => Ok(seg.writer()),
404            Inner::Kva(kva, _) => {
405                // SAFETY:
406                //  - Although the memory range points to typed memory, the range is for DMA
407                //    and the access is not by linear mapping.
408                //  - The KVA is alive during the lifetime `'_`.
409                //  - Using `VmReader` and `VmWriter` is the only way to access the KVA.
410                unsafe {
411                    Ok(VmWriter::from_kernel_space(
412                        kva.start() as *mut u8,
413                        kva.size(),
414                    ))
415                }
416            }
417        }
418    }
419}