ostd/mm/dma/
dma_coherent.rs

1// SPDX-License-Identifier: MPL-2.0
2
3use core::{fmt::Debug, mem::ManuallyDrop};
4
5use super::util::{
6    alloc_kva, cvm_need_private_protection, prepare_dma, split_daddr, unprepare_dma,
7};
8use crate::{
9    arch::irq,
10    error::Error,
11    mm::{
12        Daddr, FrameAllocOptions, HasDaddr, HasPaddr, HasPaddrRange, HasSize, Infallible,
13        PAGE_SIZE, Paddr, Segment, Split, VmReader, VmWriter,
14        io::util::{HasVmReaderWriter, VmReaderWriterIdentity},
15        kspace::kvirt_area::KVirtArea,
16    },
17};
18
19/// A DMA memory object that can be accessed in a cache-coherent manner.
20///
21/// The users need not manually synchronize the CPU cache and the device when
22/// accessing the memory region with [`VmReader`] and [`VmWriter`]. If the
23/// device doesn't not support cache-coherent access, the memory region will be
24/// mapped without caching enabled.
25///
26/// For whether the associated methods can be used in IRQs, refer to
27/// [module-level docs](crate::mm::dma#usage-in-irqs).
28#[derive(Debug)]
29pub struct DmaCoherent {
30    inner: Inner,
31    map_daddr: Option<Daddr>,
32    is_cache_coherent: bool,
33}
34
35#[derive(Debug)]
36enum Inner {
37    Segment(Segment<()>),
38    Kva(KVirtArea, Paddr),
39}
40
41impl DmaCoherent {
42    /// Allocates a region of physical memory for coherent DMA access.
43    ///
44    /// The memory of the newly-allocated DMA buffer is initialized to zeros.
45    ///
46    /// The `is_cache_coherent` argument specifies whether the target device
47    /// that the DMA mapping is prepared for can access the main memory in a
48    /// CPU cache coherent way or not.
49    ///
50    /// This method [requires](crate::mm::dma#usage-in-irqs) the caller to
51    /// have IRQs enabled.
52    pub fn alloc(nframes: usize, is_cache_coherent: bool) -> Result<Self, Error> {
53        Self::alloc_uninit(nframes, is_cache_coherent).inspect(|dma| {
54            dma.writer().fill_zeros(dma.size());
55        })
56    }
57
58    /// Allocates a region of physical memory for coherent DMA access
59    /// without initialization.
60    ///
61    /// This method is the same as [`DmaCoherent::alloc`]
62    /// except that it skips zeroing the memory of newly-allocated DMA region.
63    ///
64    /// This method [requires](crate::mm::dma#usage-in-irqs) the caller to
65    /// have IRQs enabled.
66    pub fn alloc_uninit(nframes: usize, is_cache_coherent: bool) -> Result<Self, Error> {
67        debug_assert!(irq::is_local_enabled());
68
69        let cvm = cvm_need_private_protection();
70
71        let (inner, paddr_range) = if is_cache_coherent && !cvm {
72            let segment = FrameAllocOptions::new()
73                .zeroed(false)
74                .alloc_segment(nframes)?;
75            let paddr_range = segment.paddr_range();
76
77            (Inner::Segment(segment), paddr_range)
78        } else {
79            let (kva, paddr) = alloc_kva(nframes, is_cache_coherent)?;
80
81            (Inner::Kva(kva, paddr), paddr..paddr + nframes * PAGE_SIZE)
82        };
83
84        // SAFETY: The physical address range is untyped DMA memory before `drop`.
85        let map_daddr = unsafe { prepare_dma(&paddr_range) };
86
87        Ok(Self {
88            inner,
89            map_daddr,
90            is_cache_coherent,
91        })
92    }
93}
94
95impl Split for DmaCoherent {
96    fn split(self, offset: usize) -> (Self, Self) {
97        assert!(offset.is_multiple_of(PAGE_SIZE));
98        assert!(0 < offset && offset < self.size());
99
100        let (inner, map_daddr, is_cache_coherent) = {
101            let this = ManuallyDrop::new(self);
102            (
103                // SAFETY: `this.inner` will never be used or dropped later.
104                unsafe { core::ptr::read(&this.inner as *const Inner) },
105                this.map_daddr,
106                this.is_cache_coherent,
107            )
108        };
109
110        let (inner1, inner2) = match inner {
111            Inner::Segment(segment) => {
112                let (s1, s2) = segment.split(offset);
113                (Inner::Segment(s1), Inner::Segment(s2))
114            }
115            Inner::Kva(kva, paddr) => {
116                let (kva1, kva2) = kva.split(offset);
117                let (paddr1, paddr2) = (paddr, paddr + offset);
118                (Inner::Kva(kva1, paddr1), Inner::Kva(kva2, paddr2))
119            }
120        };
121
122        let (daddr1, daddr2) = split_daddr(map_daddr, offset);
123
124        (
125            Self {
126                inner: inner1,
127                map_daddr: daddr1,
128                is_cache_coherent,
129            },
130            Self {
131                inner: inner2,
132                map_daddr: daddr2,
133                is_cache_coherent,
134            },
135        )
136    }
137}
138
139impl Drop for DmaCoherent {
140    fn drop(&mut self) {
141        // SAFETY: The physical address range was prepared in `alloc`.
142        unsafe { unprepare_dma(&self.paddr_range(), self.map_daddr) };
143    }
144}
145
146impl HasPaddr for DmaCoherent {
147    fn paddr(&self) -> Paddr {
148        match &self.inner {
149            Inner::Segment(segment) => segment.paddr(),
150            Inner::Kva(_, paddr) => *paddr,
151        }
152    }
153}
154
155impl HasDaddr for DmaCoherent {
156    fn daddr(&self) -> Daddr {
157        self.map_daddr.unwrap_or_else(|| self.paddr() as Daddr)
158    }
159}
160
161impl HasSize for DmaCoherent {
162    fn size(&self) -> usize {
163        match &self.inner {
164            Inner::Segment(segment) => segment.size(),
165            Inner::Kva(kva, _) => kva.size(),
166        }
167    }
168}
169
170impl HasVmReaderWriter for DmaCoherent {
171    type Types = VmReaderWriterIdentity;
172
173    fn reader(&self) -> VmReader<'_, Infallible> {
174        match &self.inner {
175            Inner::Segment(seg) => seg.reader(),
176            Inner::Kva(kva, _) => {
177                // SAFETY:
178                //  - The memory range points to untyped memory.
179                //  - The KVA is alive during the lifetime `'_`.
180                //  - Using `VmReader` and `VmWriter` is the only way to access the KVA.
181                unsafe { VmReader::from_kernel_space(kva.start() as *const u8, kva.size()) }
182            }
183        }
184    }
185
186    fn writer(&self) -> VmWriter<'_, Infallible> {
187        match &self.inner {
188            Inner::Segment(seg) => seg.writer(),
189            Inner::Kva(kva, _) => {
190                // SAFETY:
191                //  - The memory range points to untyped memory.
192                //  - The KVA is alive during the lifetime `'_`.
193                //  - Using `VmReader` and `VmWriter` is the only way to access the KVA.
194                unsafe { VmWriter::from_kernel_space(kva.start() as *mut u8, kva.size()) }
195            }
196        }
197    }
198}