ostd/mm/dma/
dma_stream.rs

1// SPDX-License-Identifier: MPL-2.0
2
3#![cfg_attr(
4    any(target_arch = "riscv64", target_arch = "loongarch64"),
5    allow(unfulfilled_lint_expectations)
6)]
7
8use core::ops::Range;
9
10use super::{DmaError, check_and_insert_dma_mapping, remove_dma_mapping};
11use crate::{
12    arch::iommu,
13    error::Error,
14    mm::{
15        HasDaddr, HasPaddr, HasSize, Infallible, PAGE_SIZE, Paddr, USegment, VmReader, VmWriter,
16        dma::{Daddr, DmaType, dma_type},
17        io_util::{HasVmReaderWriter, VmReaderWriterResult},
18    },
19};
20
21/// A streaming DMA mapping.
22///
23/// Users must synchronize data before reading or after writing to ensure
24/// consistency.
25#[derive(Debug)]
26pub struct DmaStream {
27    segment: USegment,
28    start_daddr: Daddr,
29    is_cache_coherent: bool,
30    direction: DmaDirection,
31}
32
33/// `DmaDirection` limits the data flow direction of [`DmaStream`] and
34/// prevents users from reading and writing to [`DmaStream`] unexpectedly.
35#[derive(Debug, PartialEq, Clone, Copy)]
36pub enum DmaDirection {
37    /// Data flows to the device
38    ToDevice,
39    /// Data flows from the device
40    FromDevice,
41    /// Data flows both from and to the device
42    Bidirectional,
43}
44
45impl DmaStream {
46    /// Establishes DMA stream mapping for a given [`USegment`].
47    ///
48    /// The method fails if the segment already belongs to a DMA mapping.
49    pub fn map(
50        segment: USegment,
51        direction: DmaDirection,
52        is_cache_coherent: bool,
53    ) -> Result<Self, DmaError> {
54        let paddr = segment.paddr();
55        let frame_count = segment.size() / PAGE_SIZE;
56
57        if !check_and_insert_dma_mapping(paddr, frame_count) {
58            return Err(DmaError::AlreadyMapped);
59        }
60
61        let start_daddr = match dma_type() {
62            DmaType::Direct => {
63                #[cfg(target_arch = "x86_64")]
64                crate::arch::if_tdx_enabled!({
65                    // SAFETY:
66                    //  - The address of a `USegment` is always page aligned.
67                    //  - A `USegment` always points to normal physical memory, so the address
68                    //    range falls in the GPA limit.
69                    //  - A `USegment` always points to normal physical memory, so all the pages
70                    //    are contained in the linear mapping.
71                    //  - The pages belong to a `USegment`, so they're all untyped memory.
72                    unsafe {
73                        crate::arch::tdx_guest::unprotect_gpa_range(paddr, frame_count).unwrap();
74                    }
75                });
76                paddr as Daddr
77            }
78            DmaType::Iommu => {
79                for i in 0..frame_count {
80                    let paddr = paddr + (i * PAGE_SIZE);
81                    // SAFETY: the `paddr` is restricted by the `paddr` and `frame_count` of the `segment`.
82                    unsafe {
83                        iommu::map(paddr as Daddr, paddr).unwrap();
84                    }
85                }
86                paddr as Daddr
87            }
88        };
89
90        Ok(Self {
91            segment,
92            start_daddr,
93            is_cache_coherent,
94            direction,
95        })
96    }
97
98    /// Gets the underlying [`USegment`].
99    ///
100    /// Usually, the CPU side should not access the memory
101    /// after the DMA mapping is established because
102    /// there is a chance that the device is updating
103    /// the memory. Do this at your own risk.
104    pub fn segment(&self) -> &USegment {
105        &self.segment
106    }
107
108    /// Returns the DMA direction.
109    pub fn direction(&self) -> DmaDirection {
110        self.direction
111    }
112
113    /// Synchronizes the streaming DMA mapping with the device.
114    ///
115    /// This method should be called under one of the two conditions:
116    /// 1. The data of the stream DMA mapping has been updated by the device side.
117    ///    The CPU side needs to call the `sync` method before reading data (e.g., using [`read_bytes`]).
118    /// 2. The data of the stream DMA mapping has been updated by the CPU side
119    ///    (e.g., using [`write_bytes`]).
120    ///    Before the CPU side notifies the device side to read, it must call the `sync` method first.
121    ///
122    /// [`read_bytes`]: crate::mm::VmIo::read_bytes
123    /// [`write_bytes`]: crate::mm::VmIo::write_bytes
124    pub fn sync(&self, byte_range: Range<usize>) -> Result<(), Error> {
125        let size = self.size();
126        if byte_range.end > size || byte_range.start > size {
127            return Err(Error::InvalidArgs);
128        }
129
130        if self.is_cache_coherent {
131            return Ok(());
132        }
133
134        let start_vaddr = crate::mm::paddr_to_vaddr(self.segment.paddr());
135        let range = (start_vaddr + byte_range.start)..(start_vaddr + byte_range.end);
136        // SAFETY: We've checked that the range is inbound, so the virtual address range and the
137        // DMA direction correspond to a DMA region (they're part of `self`).
138        unsafe { crate::arch::mm::sync_dma_range(range, self.direction) };
139
140        Ok(())
141    }
142}
143
144impl HasDaddr for DmaStream {
145    fn daddr(&self) -> Daddr {
146        self.start_daddr
147    }
148}
149
150impl Drop for DmaStream {
151    fn drop(&mut self) {
152        let paddr = self.segment.paddr();
153        let frame_count = self.segment.size() / PAGE_SIZE;
154
155        match dma_type() {
156            DmaType::Direct => {
157                #[cfg(target_arch = "x86_64")]
158                crate::arch::if_tdx_enabled!({
159                    // SAFETY:
160                    //  - The address of a `USegment` is always page aligned.
161                    //  - A `USegment` always points to normal physical memory, so the address
162                    //    range falls in the GPA limit.
163                    //  - A `USegment` always points to normal physical memory, so all the pages
164                    //    are contained in the linear mapping.
165                    //  - The pages belong to a `USegment`, so they're all untyped memory.
166                    unsafe {
167                        crate::arch::tdx_guest::protect_gpa_range(paddr, frame_count).unwrap();
168                    }
169                });
170            }
171            DmaType::Iommu => {
172                for i in 0..frame_count {
173                    let paddr = paddr + (i * PAGE_SIZE);
174                    iommu::unmap(paddr as Daddr).unwrap();
175                    // FIXME: After dropping it could be reused. IOTLB needs to be flushed.
176                }
177            }
178        }
179
180        remove_dma_mapping(paddr, frame_count);
181    }
182}
183
184impl HasVmReaderWriter for DmaStream {
185    type Types = VmReaderWriterResult;
186
187    fn reader(&self) -> Result<VmReader<'_, Infallible>, Error> {
188        if self.direction == DmaDirection::ToDevice {
189            return Err(Error::AccessDenied);
190        }
191        Ok(self.segment.reader())
192    }
193
194    fn writer(&self) -> Result<VmWriter<'_, Infallible>, Error> {
195        if self.direction == DmaDirection::FromDevice {
196            return Err(Error::AccessDenied);
197        }
198        Ok(self.segment.writer())
199    }
200}
201
202impl HasPaddr for DmaStream {
203    fn paddr(&self) -> Paddr {
204        self.segment.paddr()
205    }
206}
207
208impl HasSize for DmaStream {
209    fn size(&self) -> usize {
210        self.segment.size()
211    }
212}