ostd/mm/dma/
dma_coherent.rs

1// SPDX-License-Identifier: MPL-2.0
2
3use core::ops::Deref;
4
5use cfg_if::cfg_if;
6
7use super::{DmaError, check_and_insert_dma_mapping, remove_dma_mapping};
8use crate::{
9    arch::iommu,
10    mm::{
11        HasDaddr, HasPaddr, HasSize, Infallible, PAGE_SIZE, Paddr, USegment, VmReader, VmWriter,
12        dma::{Daddr, DmaType, dma_type},
13        io_util::{HasVmReaderWriter, VmReaderWriterIdentity},
14        kspace::{KERNEL_PAGE_TABLE, paddr_to_vaddr},
15        page_prop::CachePolicy,
16    },
17};
18
19cfg_if! {
20    if #[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))] {
21        use crate::arch::tdx_guest;
22    }
23}
24
25/// A coherent (or consistent) DMA mapping,
26/// which guarantees that the device and the CPU can
27/// access the data in parallel.
28///
29/// The mapping will be destroyed automatically when
30/// the object is dropped.
31#[derive(Debug)]
32pub struct DmaCoherent {
33    segment: USegment,
34    start_daddr: Daddr,
35    is_cache_coherent: bool,
36}
37
38impl DmaCoherent {
39    /// Creates a coherent DMA mapping backed by `segment`.
40    ///
41    /// The `is_cache_coherent` argument specifies whether
42    /// the target device that the DMA mapping is prepared for
43    /// can access the main memory in a CPU cache coherent way
44    /// or not.
45    ///
46    /// The method fails if any part of the given `segment`
47    /// already belongs to a DMA mapping.
48    pub fn map(segment: USegment, is_cache_coherent: bool) -> core::result::Result<Self, DmaError> {
49        let paddr = segment.paddr();
50        let frame_count = segment.size() / PAGE_SIZE;
51
52        if !check_and_insert_dma_mapping(paddr, frame_count) {
53            return Err(DmaError::AlreadyMapped);
54        }
55
56        if !is_cache_coherent {
57            let page_table = KERNEL_PAGE_TABLE.get().unwrap();
58            let vaddr = paddr_to_vaddr(paddr);
59            let va_range = vaddr..vaddr + (frame_count * PAGE_SIZE);
60            // SAFETY: the physical mappings is only used by DMA so protecting it is safe.
61            unsafe {
62                page_table
63                    .protect_flush_tlb(&va_range, |p| p.cache = CachePolicy::Uncacheable)
64                    .unwrap();
65            }
66        }
67
68        let start_daddr = match dma_type() {
69            DmaType::Direct => {
70                #[cfg(target_arch = "x86_64")]
71                crate::arch::if_tdx_enabled!({
72                    // SAFETY:
73                    //  - The address of a `USegment` is always page aligned.
74                    //  - A `USegment` always points to normal physical memory, so the address
75                    //    range falls in the GPA limit.
76                    //  - A `USegment` always points to normal physical memory, so all the pages
77                    //    are contained in the linear mapping.
78                    //  - The pages belong to a `USegment`, so they're all untyped memory.
79                    unsafe {
80                        tdx_guest::unprotect_gpa_range(paddr, frame_count).unwrap();
81                    }
82                });
83                paddr as Daddr
84            }
85            DmaType::Iommu => {
86                for i in 0..frame_count {
87                    let paddr = paddr + (i * PAGE_SIZE);
88                    // SAFETY: the `paddr` is restricted by the `paddr` and `frame_count` of the `segment`.
89                    unsafe {
90                        iommu::map(paddr as Daddr, paddr).unwrap();
91                    }
92                }
93                paddr as Daddr
94            }
95        };
96
97        Ok(Self {
98            segment,
99            start_daddr,
100            is_cache_coherent,
101        })
102    }
103}
104
105impl Deref for DmaCoherent {
106    type Target = USegment;
107    fn deref(&self) -> &Self::Target {
108        &self.segment
109    }
110}
111
112impl Drop for DmaCoherent {
113    fn drop(&mut self) {
114        let paddr = self.segment.paddr();
115        let frame_count = self.segment.size() / PAGE_SIZE;
116
117        match dma_type() {
118            DmaType::Direct => {
119                #[cfg(target_arch = "x86_64")]
120                crate::arch::if_tdx_enabled!({
121                    // SAFETY:
122                    //  - The address of a `USegment` is always page aligned.
123                    //  - A `USegment` always points to normal physical memory, so the address
124                    //    range falls in the GPA limit.
125                    //  - A `USegment` always points to normal physical memory, so all the pages
126                    //    are contained in the linear mapping.
127                    //  - The pages belong to a `USegment`, so they're all untyped memory.
128                    unsafe {
129                        tdx_guest::protect_gpa_range(paddr, frame_count).unwrap();
130                    }
131                });
132            }
133            DmaType::Iommu => {
134                for i in 0..frame_count {
135                    let paddr = paddr + (i * PAGE_SIZE);
136                    iommu::unmap(paddr as Daddr).unwrap();
137                    // FIXME: After dropping it could be reused. IOTLB needs to be flushed.
138                }
139            }
140        }
141
142        if !self.is_cache_coherent {
143            let page_table = KERNEL_PAGE_TABLE.get().unwrap();
144            let vaddr = paddr_to_vaddr(paddr);
145            let va_range = vaddr..vaddr + (frame_count * PAGE_SIZE);
146            // SAFETY: the physical mappings is only used by DMA so protecting it is safe.
147            unsafe {
148                page_table
149                    .protect_flush_tlb(&va_range, |p| p.cache = CachePolicy::Writeback)
150                    .unwrap();
151            }
152        }
153
154        remove_dma_mapping(paddr, frame_count);
155    }
156}
157
158impl HasPaddr for DmaCoherent {
159    fn paddr(&self) -> Paddr {
160        self.segment.paddr()
161    }
162}
163
164impl HasSize for DmaCoherent {
165    fn size(&self) -> usize {
166        self.segment.size()
167    }
168}
169
170impl HasDaddr for DmaCoherent {
171    fn daddr(&self) -> Daddr {
172        self.start_daddr
173    }
174}
175
176impl HasVmReaderWriter for DmaCoherent {
177    type Types = VmReaderWriterIdentity;
178
179    fn reader(&self) -> VmReader<'_, Infallible> {
180        self.segment.reader()
181    }
182
183    fn writer(&self) -> VmWriter<'_, Infallible> {
184        self.segment.writer()
185    }
186}