ostd/mm/dma/
dma_coherent.rs

1// SPDX-License-Identifier: MPL-2.0
2
3use core::{fmt::Debug, mem::ManuallyDrop};
4
5use super::util::{
6    alloc_kva, cvm_need_private_protection, prepare_dma, split_daddr, unprepare_dma,
7};
8use crate::{
9    error::Error,
10    mm::{
11        Daddr, FrameAllocOptions, HasDaddr, HasPaddr, HasPaddrRange, HasSize, Infallible,
12        PAGE_SIZE, Paddr, Segment, Split, VmReader, VmWriter,
13        io_util::{HasVmReaderWriter, VmReaderWriterIdentity},
14        kspace::kvirt_area::KVirtArea,
15    },
16};
17
18/// A DMA memory object that can be accessed in a cache-coherent manner.
19///
20/// The users need not manually synchronize the CPU cache and the device when
21/// accessing the memory region with [`VmReader`] and [`VmWriter`]. If the
22/// device doesn't not support cache-coherent access, the memory region will be
23/// mapped without caching enabled.
24#[derive(Debug)]
25pub struct DmaCoherent {
26    inner: Inner,
27    map_daddr: Option<Daddr>,
28    is_cache_coherent: bool,
29}
30
31#[derive(Debug)]
32enum Inner {
33    Segment(Segment<()>),
34    Kva(KVirtArea, Paddr),
35}
36
37impl DmaCoherent {
38    /// Allocates a region of physical memory for coherent DMA access.
39    ///
40    /// The memory of the newly-allocated DMA buffer is initialized to zeros.
41    ///
42    /// The `is_cache_coherent` argument specifies whether the target device
43    /// that the DMA mapping is prepared for can access the main memory in a
44    /// CPU cache coherent way or not.
45    pub fn alloc(nframes: usize, is_cache_coherent: bool) -> Result<Self, Error> {
46        Self::alloc_uninit(nframes, is_cache_coherent).inspect(|dma| {
47            dma.writer().fill_zeros(dma.size());
48        })
49    }
50
51    /// Allocates a region of physical memory for coherent DMA access
52    /// without initialization.
53    ///
54    /// This method is the same as [`DmaCoherent::alloc`]
55    /// except that it skips zeroing the memory of newly-allocated DMA region.
56    pub fn alloc_uninit(nframes: usize, is_cache_coherent: bool) -> Result<Self, Error> {
57        let cvm = cvm_need_private_protection();
58
59        let (inner, paddr_range) = if is_cache_coherent && !cvm {
60            let segment = FrameAllocOptions::new()
61                .zeroed(false)
62                .alloc_segment(nframes)?;
63            let paddr_range = segment.paddr_range();
64
65            (Inner::Segment(segment), paddr_range)
66        } else {
67            let (kva, paddr) = alloc_kva(nframes, is_cache_coherent)?;
68
69            (Inner::Kva(kva, paddr), paddr..paddr + nframes * PAGE_SIZE)
70        };
71
72        // SAFETY: The physical address range is untyped DMA memory before `drop`.
73        let map_daddr = unsafe { prepare_dma(&paddr_range) };
74
75        Ok(Self {
76            inner,
77            map_daddr,
78            is_cache_coherent,
79        })
80    }
81}
82
83impl Split for DmaCoherent {
84    fn split(self, offset: usize) -> (Self, Self) {
85        assert!(offset.is_multiple_of(PAGE_SIZE));
86        assert!(0 < offset && offset < self.size());
87
88        let (inner, map_daddr, is_cache_coherent) = {
89            let this = ManuallyDrop::new(self);
90            (
91                // SAFETY: `this.inner` will never be used or dropped later.
92                unsafe { core::ptr::read(&this.inner as *const Inner) },
93                this.map_daddr,
94                this.is_cache_coherent,
95            )
96        };
97
98        let (inner1, inner2) = match inner {
99            Inner::Segment(segment) => {
100                let (s1, s2) = segment.split(offset);
101                (Inner::Segment(s1), Inner::Segment(s2))
102            }
103            Inner::Kva(kva, paddr) => {
104                let (kva1, kva2) = kva.split(offset);
105                let (paddr1, paddr2) = (paddr, paddr + offset);
106                (Inner::Kva(kva1, paddr1), Inner::Kva(kva2, paddr2))
107            }
108        };
109
110        let (daddr1, daddr2) = split_daddr(map_daddr, offset);
111
112        (
113            Self {
114                inner: inner1,
115                map_daddr: daddr1,
116                is_cache_coherent,
117            },
118            Self {
119                inner: inner2,
120                map_daddr: daddr2,
121                is_cache_coherent,
122            },
123        )
124    }
125}
126
127impl Drop for DmaCoherent {
128    fn drop(&mut self) {
129        // SAFETY: The physical address range was prepeared in `alloc`.
130        unsafe { unprepare_dma(&self.paddr_range(), self.map_daddr) };
131    }
132}
133
134impl HasPaddr for DmaCoherent {
135    fn paddr(&self) -> Paddr {
136        match &self.inner {
137            Inner::Segment(segment) => segment.paddr(),
138            Inner::Kva(_, paddr) => *paddr,
139        }
140    }
141}
142
143impl HasDaddr for DmaCoherent {
144    fn daddr(&self) -> Daddr {
145        self.map_daddr.unwrap_or_else(|| self.paddr() as Daddr)
146    }
147}
148
149impl HasSize for DmaCoherent {
150    fn size(&self) -> usize {
151        match &self.inner {
152            Inner::Segment(segment) => segment.size(),
153            Inner::Kva(kva, _) => kva.size(),
154        }
155    }
156}
157
158impl HasVmReaderWriter for DmaCoherent {
159    type Types = VmReaderWriterIdentity;
160
161    fn reader(&self) -> VmReader<'_, Infallible> {
162        match &self.inner {
163            Inner::Segment(seg) => seg.reader(),
164            Inner::Kva(kva, _) => {
165                // SAFETY:
166                //  - The memory range points to untyped memory.
167                //  - The KVA is alive during the lifetime `'_`.
168                //  - Using `VmReader` and `VmWriter` is the only way to access the KVA.
169                unsafe { VmReader::from_kernel_space(kva.start() as *const u8, kva.size()) }
170            }
171        }
172    }
173
174    fn writer(&self) -> VmWriter<'_, Infallible> {
175        match &self.inner {
176            Inner::Segment(seg) => seg.writer(),
177            Inner::Kva(kva, _) => {
178                // SAFETY:
179                //  - The memory range points to untyped memory.
180                //  - The KVA is alive during the lifetime `'_`.
181                //  - Using `VmReader` and `VmWriter` is the only way to access the KVA.
182                unsafe { VmWriter::from_kernel_space(kva.start() as *mut u8, kva.size()) }
183            }
184        }
185    }
186}