ostd/mm/dma/
dma_stream.rs

1// SPDX-License-Identifier: MPL-2.0
2
3use core::{fmt::Debug, marker::PhantomData, mem::ManuallyDrop, ops::Range};
4
5use super::util::{
6    alloc_kva, cvm_need_private_protection, prepare_dma, split_daddr, unprepare_dma,
7};
8use crate::{
9    arch::mm::can_sync_dma,
10    error::Error,
11    mm::{
12        Daddr, FrameAllocOptions, HasDaddr, HasPaddr, HasPaddrRange, HasSize, Infallible,
13        PAGE_SIZE, Paddr, Split, USegment, VmReader, VmWriter,
14        io_util::{HasVmReaderWriter, VmReaderWriterResult},
15        kspace::kvirt_area::KVirtArea,
16        paddr_to_vaddr,
17    },
18};
19
20/// [`DmaDirection`] limits the data flow direction of [`DmaStream`] and
21/// prevents users from reading and writing to [`DmaStream`] unexpectedly.
22pub trait DmaDirection: 'static + Debug + private::Sealed {
23    /// Whether the CPU can read data from the device.
24    const CAN_READ_FROM_DEVICE: bool;
25    /// Whether the CPU can write data to the device.
26    const CAN_WRITE_TO_DEVICE: bool;
27}
28
29mod private {
30    /// To avoid users implement `DmaDirection` and triggers unreachable code in
31    /// functions like [`crate::arch::mm::sync_dma_range`], or bypasses checks
32    /// in [`crate::mm::io_util::HasVmReaderWriter`].
33    pub trait Sealed {}
34}
35
36/// Data flows to the device.
37///
38/// From the perspective of the kernel, this memory region is writable.
39#[derive(Debug)]
40pub enum ToDevice {}
41
42impl private::Sealed for ToDevice {}
43impl DmaDirection for ToDevice {
44    const CAN_READ_FROM_DEVICE: bool = false;
45    const CAN_WRITE_TO_DEVICE: bool = true;
46}
47
48/// Data flows from the device.
49///
50/// From the perspective of the kernel, this memory region is read-only.
51#[derive(Debug)]
52pub enum FromDevice {}
53
54impl private::Sealed for FromDevice {}
55impl DmaDirection for FromDevice {
56    const CAN_READ_FROM_DEVICE: bool = true;
57    const CAN_WRITE_TO_DEVICE: bool = false;
58}
59
60/// Data flows both from and to the device.
61#[derive(Debug)]
62pub enum FromAndToDevice {}
63
64impl private::Sealed for FromAndToDevice {}
65impl DmaDirection for FromAndToDevice {
66    const CAN_READ_FROM_DEVICE: bool = true;
67    const CAN_WRITE_TO_DEVICE: bool = true;
68}
69
70/// A DMA memory object with streaming access.
71///
72/// The kernel must synchronize the data by [`sync_from_device`]/[`sync_to_device`]
73/// when interacting with the device.
74///
75/// [`sync_from_device`]: DmaStream::sync_from_device
76/// [`sync_to_device`]: DmaStream::sync_to_device
77#[derive(Debug)]
78pub struct DmaStream<D: DmaDirection = FromAndToDevice> {
79    inner: Inner,
80    map_daddr: Option<Daddr>,
81    is_cache_coherent: bool,
82    _phantom: PhantomData<D>,
83}
84
85#[derive(Debug)]
86enum Inner {
87    Segment(USegment),
88    Kva(KVirtArea, Paddr),
89    Both(KVirtArea, Paddr, USegment),
90}
91
92impl<D: DmaDirection> DmaStream<D> {
93    /// Allocates a region of physical memory for streaming DMA access.
94    ///
95    /// The memory of the newly-allocated DMA buffer is initialized to zeros.
96    /// This method is only available when `D` is [`ToDevice`] or
97    /// [`FromAndToDevice`], as zeroing requires write access to the buffer.
98    ///
99    /// The `is_cache_coherent` argument specifies whether the target device
100    /// that the DMA mapping is prepared for can access the main memory in a
101    /// CPU cache coherent way or not.
102    ///
103    /// # Comparison with [`DmaStream::map`]
104    ///
105    /// This method is semantically equivalent to allocating a [`USegment`] via
106    /// [`FrameAllocOptions::alloc_segment`] and then mapping it with
107    /// [`DmaStream::map`]. However, [`DmaStream::alloc`] combines these two
108    /// operations and can be more efficient in certain scenarios, particularly
109    /// in confidential VMs, where the overhead of bounce buffers can be
110    /// avoided.
111    pub fn alloc(nframes: usize, is_cache_coherent: bool) -> Result<Self, Error> {
112        const { assert!(D::CAN_WRITE_TO_DEVICE) };
113
114        Self::alloc_uninit(nframes, is_cache_coherent).and_then(|dma| {
115            dma.writer()?.fill_zeros(dma.size());
116            Ok(dma)
117        })
118    }
119
120    /// Allocates a region of physical memory for streaming DMA access
121    /// without initialization.
122    ///
123    /// This method is the same as [`DmaStream::alloc`]
124    /// except that it skips zeroing the memory of newly-allocated DMA region.
125    pub fn alloc_uninit(nframes: usize, is_cache_coherent: bool) -> Result<Self, Error> {
126        let cvm = cvm_need_private_protection();
127
128        let (inner, paddr_range) = if (can_sync_dma() || is_cache_coherent) && !cvm {
129            let segment: USegment = FrameAllocOptions::new()
130                .zeroed(false)
131                .alloc_segment(nframes)?
132                .into();
133            let paddr_range = segment.paddr_range();
134
135            (Inner::Segment(segment), paddr_range)
136        } else {
137            let (kva, paddr) = alloc_kva(nframes, can_sync_dma() || is_cache_coherent)?;
138
139            (Inner::Kva(kva, paddr), paddr..paddr + nframes * PAGE_SIZE)
140        };
141
142        // SAFETY: The physical address range is untyped DMA memory before `drop`.
143        let map_daddr = unsafe { prepare_dma(&paddr_range) };
144
145        Ok(Self {
146            inner,
147            map_daddr,
148            is_cache_coherent,
149            _phantom: PhantomData,
150        })
151    }
152
153    /// Establishes DMA stream mapping for a given [`USegment`].
154    ///
155    /// The `is_cache_coherent` argument specifies whether the target device
156    /// that the DMA mapping is prepared for can access the main memory in a
157    /// CPU cache coherent way or not.
158    pub fn map(segment: USegment, is_cache_coherent: bool) -> Result<Self, Error> {
159        let cvm = cvm_need_private_protection();
160        let size = segment.size();
161
162        let (inner, paddr) = if (can_sync_dma() || is_cache_coherent) && !cvm {
163            let paddr = segment.paddr();
164
165            (Inner::Segment(segment), paddr)
166        } else {
167            let (kva, paddr) = alloc_kva(size / PAGE_SIZE, is_cache_coherent)?;
168
169            (Inner::Both(kva, paddr, segment), paddr)
170        };
171
172        let paddr_range = paddr..paddr + size;
173
174        // SAFETY: The physical address range is untyped DMA memory before `drop`.
175        let map_daddr = unsafe { prepare_dma(&paddr_range) };
176
177        Ok(Self {
178            inner,
179            map_daddr,
180            is_cache_coherent,
181            _phantom: PhantomData,
182        })
183    }
184
185    /// Synchronizes the streaming DMA mapping data from the device.
186    ///
187    /// This method should be called when the data of the streaming DMA mapping
188    /// has been updated by the device side. Before the CPU side starts to read
189    /// (e.g., using [`read_bytes`]), it must call the [`Self::sync_from_device`]
190    /// method first.
191    ///
192    /// [`read_bytes`]: crate::mm::VmIo::read_bytes
193    pub fn sync_from_device(&self, byte_range: Range<usize>) -> Result<(), Error> {
194        const { assert!(D::CAN_READ_FROM_DEVICE) };
195
196        self.sync_impl(byte_range, true)
197    }
198
199    /// Synchronizes the streaming DMA mapping data to the device.
200    ///
201    /// This method should be called when the data of the streaming DMA mapping
202    /// has been updated by the CPU side (e.g., using [`write_bytes`]). Before
203    /// the CPU side notifies the device side to read, it must call the
204    /// [`Self::sync_to_device`] method first.
205    ///
206    /// [`write_bytes`]: crate::mm::VmIo::write_bytes
207    pub fn sync_to_device(&self, byte_range: Range<usize>) -> Result<(), Error> {
208        const { assert!(D::CAN_WRITE_TO_DEVICE) };
209
210        self.sync_impl(byte_range, false)
211    }
212
213    fn sync_impl(&self, byte_range: Range<usize>, is_from_device: bool) -> Result<(), Error> {
214        let size = self.size();
215        if byte_range.end > size || byte_range.start > size {
216            return Err(Error::InvalidArgs);
217        }
218        if self.is_cache_coherent {
219            return Ok(());
220        }
221
222        let va_range = match &self.inner {
223            Inner::Segment(segment) => {
224                let pa_range = segment.paddr_range();
225                paddr_to_vaddr(pa_range.start)..paddr_to_vaddr(pa_range.end)
226            }
227            Inner::Kva(kva, _) => {
228                if !can_sync_dma() {
229                    // The KVA is mapped as uncachable.
230                    return Ok(());
231                }
232                kva.range()
233            }
234            Inner::Both(kva, _, seg) => {
235                self.sync_via_copying(byte_range, is_from_device, seg, kva);
236                return Ok(());
237            }
238        };
239        let range = va_range.start + byte_range.start..va_range.start + byte_range.end;
240
241        // SAFETY:
242        // 1. We've checked that the range is inbound, so the virtual address
243        //    range and the DMA direction correspond to a DMA region (they're
244        //    part of `self`).
245        // 2. `can_sync_dma()` is either checked above (for `Inner::Kva`) or
246        //    checked when constructing `self` (for `Inner::Segment`).
247        unsafe { crate::arch::mm::sync_dma_range::<D>(range) };
248
249        Ok(())
250    }
251
252    fn sync_via_copying(
253        &self,
254        byte_range: Range<usize>,
255        is_from_device: bool,
256        seg: &USegment,
257        kva: &KVirtArea,
258    ) {
259        let skip = byte_range.start;
260        let limit = byte_range.len();
261
262        let (mut reader, mut writer) = if is_from_device {
263            // SAFETY:
264            //  - The memory range points to untyped memory.
265            //  - The KVA is alive in this scope.
266            //  - Using `VmReader` and `VmWriter` is the only way to access the KVA.
267            let kva_reader =
268                unsafe { VmReader::from_kernel_space(kva.start() as *const u8, kva.size()) };
269
270            (kva_reader, seg.writer())
271        } else {
272            // SAFETY:
273            //  - The memory range points to untyped memory.
274            //  - The KVA is alive in this scope.
275            //  - Using `VmReader` and `VmWriter` is the only way to access the KVA.
276            let kva_writer =
277                unsafe { VmWriter::from_kernel_space(kva.start() as *mut u8, kva.size()) };
278
279            (seg.reader(), kva_writer)
280        };
281
282        writer
283            .skip(skip)
284            .limit(limit)
285            .write(reader.skip(skip).limit(limit));
286    }
287}
288
289impl<D: DmaDirection> Split for DmaStream<D> {
290    fn split(self, offset: usize) -> (Self, Self) {
291        assert!(offset.is_multiple_of(PAGE_SIZE));
292        assert!(0 < offset && offset < self.size());
293
294        let (inner, map_daddr, is_cache_coherent) = {
295            let this = ManuallyDrop::new(self);
296            (
297                // SAFETY: `this.inner` will never be used or dropped later.
298                unsafe { core::ptr::read(&this.inner as *const Inner) },
299                this.map_daddr,
300                this.is_cache_coherent,
301            )
302        };
303
304        let (inner1, inner2) = match inner {
305            Inner::Segment(segment) => {
306                let (s1, s2) = segment.split(offset);
307                (Inner::Segment(s1), Inner::Segment(s2))
308            }
309            Inner::Kva(kva, paddr) => {
310                let (kva1, kva2) = kva.split(offset);
311                let (paddr1, paddr2) = (paddr, paddr + offset);
312                (Inner::Kva(kva1, paddr1), Inner::Kva(kva2, paddr2))
313            }
314            Inner::Both(kva, paddr, segment) => {
315                let (kva1, kva2) = kva.split(offset);
316                let (paddr1, paddr2) = (paddr, paddr + offset);
317                let (s1, s2) = segment.split(offset);
318                (Inner::Both(kva1, paddr1, s1), Inner::Both(kva2, paddr2, s2))
319            }
320        };
321
322        let (daddr1, daddr2) = split_daddr(map_daddr, offset);
323
324        (
325            Self {
326                inner: inner1,
327                map_daddr: daddr1,
328                is_cache_coherent,
329                _phantom: PhantomData,
330            },
331            Self {
332                inner: inner2,
333                map_daddr: daddr2,
334                is_cache_coherent,
335                _phantom: PhantomData,
336            },
337        )
338    }
339}
340
341impl<D: DmaDirection> Drop for DmaStream<D> {
342    fn drop(&mut self) {
343        // SAFETY: The physical address range was prepared in `map`.
344        unsafe { unprepare_dma(&self.paddr_range(), self.map_daddr) };
345    }
346}
347
348impl<D: DmaDirection> HasPaddr for DmaStream<D> {
349    fn paddr(&self) -> Paddr {
350        match &self.inner {
351            Inner::Segment(segment) => segment.paddr(),
352            Inner::Kva(_, paddr) | Inner::Both(_, paddr, _) => *paddr, // the mapped PA, not the buffer's PA
353        }
354    }
355}
356
357impl<D: DmaDirection> HasDaddr for DmaStream<D> {
358    fn daddr(&self) -> Daddr {
359        self.map_daddr.unwrap_or_else(|| self.paddr() as Daddr)
360    }
361}
362
363impl<D: DmaDirection> HasSize for DmaStream<D> {
364    fn size(&self) -> usize {
365        match &self.inner {
366            Inner::Segment(segment) => segment.size(),
367            Inner::Kva(kva, _) => kva.size(),
368            Inner::Both(kva, _, segment) => {
369                debug_assert_eq!(kva.size(), segment.size());
370                kva.size()
371            }
372        }
373    }
374}
375
376impl<D: DmaDirection> HasVmReaderWriter for DmaStream<D> {
377    type Types = VmReaderWriterResult;
378
379    fn reader(&self) -> Result<VmReader<'_, Infallible>, Error> {
380        if !D::CAN_READ_FROM_DEVICE {
381            return Err(Error::AccessDenied);
382        }
383        match &self.inner {
384            Inner::Segment(seg) | Inner::Both(_, _, seg) => Ok(seg.reader()),
385            Inner::Kva(kva, _) => {
386                // SAFETY:
387                //  - Although the memory range points to typed memory, the range is for DMA
388                //    and the access is not by linear mapping.
389                //  - The KVA is alive during the lifetime `'_`.
390                //  - Using `VmReader` and `VmWriter` is the only way to access the KVA.
391                unsafe {
392                    Ok(VmReader::from_kernel_space(
393                        kva.start() as *const u8,
394                        kva.size(),
395                    ))
396                }
397            }
398        }
399    }
400
401    fn writer(&self) -> Result<VmWriter<'_, Infallible>, Error> {
402        if !D::CAN_WRITE_TO_DEVICE {
403            return Err(Error::AccessDenied);
404        }
405        match &self.inner {
406            Inner::Segment(seg) | Inner::Both(_, _, seg) => Ok(seg.writer()),
407            Inner::Kva(kva, _) => {
408                // SAFETY:
409                //  - Although the memory range points to typed memory, the range is for DMA
410                //    and the access is not by linear mapping.
411                //  - The KVA is alive during the lifetime `'_`.
412                //  - Using `VmReader` and `VmWriter` is the only way to access the KVA.
413                unsafe {
414                    Ok(VmWriter::from_kernel_space(
415                        kva.start() as *mut u8,
416                        kva.size(),
417                    ))
418                }
419            }
420        }
421    }
422}