1use core::{fmt::Debug, marker::PhantomData, mem::ManuallyDrop, ops::Range};
4
5use super::util::{
6 alloc_kva, cvm_need_private_protection, prepare_dma, split_daddr, unprepare_dma,
7};
8use crate::{
9 arch::mm::can_sync_dma,
10 error::Error,
11 mm::{
12 Daddr, FrameAllocOptions, HasDaddr, HasPaddr, HasPaddrRange, HasSize, Infallible,
13 PAGE_SIZE, Paddr, Split, USegment, VmReader, VmWriter,
14 io_util::{HasVmReaderWriter, VmReaderWriterResult},
15 kspace::kvirt_area::KVirtArea,
16 paddr_to_vaddr,
17 },
18};
19
20pub trait DmaDirection: 'static + Debug + private::Sealed {
23 const CAN_READ_FROM_DEVICE: bool;
25 const CAN_WRITE_TO_DEVICE: bool;
27}
28
29mod private {
30 pub trait Sealed {}
34}
35
36#[derive(Debug)]
40pub enum ToDevice {}
41
42impl private::Sealed for ToDevice {}
43impl DmaDirection for ToDevice {
44 const CAN_READ_FROM_DEVICE: bool = false;
45 const CAN_WRITE_TO_DEVICE: bool = true;
46}
47
48#[derive(Debug)]
52pub enum FromDevice {}
53
54impl private::Sealed for FromDevice {}
55impl DmaDirection for FromDevice {
56 const CAN_READ_FROM_DEVICE: bool = true;
57 const CAN_WRITE_TO_DEVICE: bool = false;
58}
59
60#[derive(Debug)]
62pub enum FromAndToDevice {}
63
64impl private::Sealed for FromAndToDevice {}
65impl DmaDirection for FromAndToDevice {
66 const CAN_READ_FROM_DEVICE: bool = true;
67 const CAN_WRITE_TO_DEVICE: bool = true;
68}
69
70#[derive(Debug)]
78pub struct DmaStream<D: DmaDirection = FromAndToDevice> {
79 inner: Inner,
80 map_daddr: Option<Daddr>,
81 is_cache_coherent: bool,
82 _phantom: PhantomData<D>,
83}
84
85#[derive(Debug)]
86enum Inner {
87 Segment(USegment),
88 Kva(KVirtArea, Paddr),
89 Both(KVirtArea, Paddr, USegment),
90}
91
92impl<D: DmaDirection> DmaStream<D> {
93 pub fn alloc(nframes: usize, is_cache_coherent: bool) -> Result<Self, Error> {
112 const { assert!(D::CAN_WRITE_TO_DEVICE) };
113
114 Self::alloc_uninit(nframes, is_cache_coherent).and_then(|dma| {
115 dma.writer()?.fill_zeros(dma.size());
116 Ok(dma)
117 })
118 }
119
120 pub fn alloc_uninit(nframes: usize, is_cache_coherent: bool) -> Result<Self, Error> {
126 let cvm = cvm_need_private_protection();
127
128 let (inner, paddr_range) = if (can_sync_dma() || is_cache_coherent) && !cvm {
129 let segment: USegment = FrameAllocOptions::new()
130 .zeroed(false)
131 .alloc_segment(nframes)?
132 .into();
133 let paddr_range = segment.paddr_range();
134
135 (Inner::Segment(segment), paddr_range)
136 } else {
137 let (kva, paddr) = alloc_kva(nframes, can_sync_dma() || is_cache_coherent)?;
138
139 (Inner::Kva(kva, paddr), paddr..paddr + nframes * PAGE_SIZE)
140 };
141
142 let map_daddr = unsafe { prepare_dma(&paddr_range) };
144
145 Ok(Self {
146 inner,
147 map_daddr,
148 is_cache_coherent,
149 _phantom: PhantomData,
150 })
151 }
152
153 pub fn map(segment: USegment, is_cache_coherent: bool) -> Result<Self, Error> {
159 let cvm = cvm_need_private_protection();
160 let size = segment.size();
161
162 let (inner, paddr) = if (can_sync_dma() || is_cache_coherent) && !cvm {
163 let paddr = segment.paddr();
164
165 (Inner::Segment(segment), paddr)
166 } else {
167 let (kva, paddr) = alloc_kva(size / PAGE_SIZE, is_cache_coherent)?;
168
169 (Inner::Both(kva, paddr, segment), paddr)
170 };
171
172 let paddr_range = paddr..paddr + size;
173
174 let map_daddr = unsafe { prepare_dma(&paddr_range) };
176
177 Ok(Self {
178 inner,
179 map_daddr,
180 is_cache_coherent,
181 _phantom: PhantomData,
182 })
183 }
184
185 pub fn sync_from_device(&self, byte_range: Range<usize>) -> Result<(), Error> {
194 const { assert!(D::CAN_READ_FROM_DEVICE) };
195
196 self.sync_impl(byte_range, true)
197 }
198
199 pub fn sync_to_device(&self, byte_range: Range<usize>) -> Result<(), Error> {
208 const { assert!(D::CAN_WRITE_TO_DEVICE) };
209
210 self.sync_impl(byte_range, false)
211 }
212
213 fn sync_impl(&self, byte_range: Range<usize>, is_from_device: bool) -> Result<(), Error> {
214 let size = self.size();
215 if byte_range.end > size || byte_range.start > size {
216 return Err(Error::InvalidArgs);
217 }
218 if self.is_cache_coherent {
219 return Ok(());
220 }
221
222 let va_range = match &self.inner {
223 Inner::Segment(segment) => {
224 let pa_range = segment.paddr_range();
225 paddr_to_vaddr(pa_range.start)..paddr_to_vaddr(pa_range.end)
226 }
227 Inner::Kva(kva, _) => {
228 if !can_sync_dma() {
229 return Ok(());
231 }
232 kva.range()
233 }
234 Inner::Both(kva, _, seg) => {
235 self.sync_via_copying(byte_range, is_from_device, seg, kva);
236 return Ok(());
237 }
238 };
239 let range = va_range.start + byte_range.start..va_range.start + byte_range.end;
240
241 unsafe { crate::arch::mm::sync_dma_range::<D>(range) };
245
246 Ok(())
247 }
248
249 fn sync_via_copying(
250 &self,
251 byte_range: Range<usize>,
252 is_from_device: bool,
253 seg: &USegment,
254 kva: &KVirtArea,
255 ) {
256 let skip = byte_range.start;
257 let limit = byte_range.len();
258
259 let (mut reader, mut writer) = if is_from_device {
260 let kva_reader =
265 unsafe { VmReader::from_kernel_space(kva.start() as *const u8, kva.size()) };
266
267 (kva_reader, seg.writer())
268 } else {
269 let kva_writer =
274 unsafe { VmWriter::from_kernel_space(kva.start() as *mut u8, kva.size()) };
275
276 (seg.reader(), kva_writer)
277 };
278
279 writer
280 .skip(skip)
281 .limit(limit)
282 .write(reader.skip(skip).limit(limit));
283 }
284}
285
286impl<D: DmaDirection> Split for DmaStream<D> {
287 fn split(self, offset: usize) -> (Self, Self) {
288 assert!(offset.is_multiple_of(PAGE_SIZE));
289 assert!(0 < offset && offset < self.size());
290
291 let (inner, map_daddr, is_cache_coherent) = {
292 let this = ManuallyDrop::new(self);
293 (
294 unsafe { core::ptr::read(&this.inner as *const Inner) },
296 this.map_daddr,
297 this.is_cache_coherent,
298 )
299 };
300
301 let (inner1, inner2) = match inner {
302 Inner::Segment(segment) => {
303 let (s1, s2) = segment.split(offset);
304 (Inner::Segment(s1), Inner::Segment(s2))
305 }
306 Inner::Kva(kva, paddr) => {
307 let (kva1, kva2) = kva.split(offset);
308 let (paddr1, paddr2) = (paddr, paddr + offset);
309 (Inner::Kva(kva1, paddr1), Inner::Kva(kva2, paddr2))
310 }
311 Inner::Both(kva, paddr, segment) => {
312 let (kva1, kva2) = kva.split(offset);
313 let (paddr1, paddr2) = (paddr, paddr + offset);
314 let (s1, s2) = segment.split(offset);
315 (Inner::Both(kva1, paddr1, s1), Inner::Both(kva2, paddr2, s2))
316 }
317 };
318
319 let (daddr1, daddr2) = split_daddr(map_daddr, offset);
320
321 (
322 Self {
323 inner: inner1,
324 map_daddr: daddr1,
325 is_cache_coherent,
326 _phantom: PhantomData,
327 },
328 Self {
329 inner: inner2,
330 map_daddr: daddr2,
331 is_cache_coherent,
332 _phantom: PhantomData,
333 },
334 )
335 }
336}
337
338impl<D: DmaDirection> Drop for DmaStream<D> {
339 fn drop(&mut self) {
340 unsafe { unprepare_dma(&self.paddr_range(), self.map_daddr) };
342 }
343}
344
345impl<D: DmaDirection> HasPaddr for DmaStream<D> {
346 fn paddr(&self) -> Paddr {
347 match &self.inner {
348 Inner::Segment(segment) => segment.paddr(),
349 Inner::Kva(_, paddr) | Inner::Both(_, paddr, _) => *paddr, }
351 }
352}
353
354impl<D: DmaDirection> HasDaddr for DmaStream<D> {
355 fn daddr(&self) -> Daddr {
356 self.map_daddr.unwrap_or_else(|| self.paddr() as Daddr)
357 }
358}
359
360impl<D: DmaDirection> HasSize for DmaStream<D> {
361 fn size(&self) -> usize {
362 match &self.inner {
363 Inner::Segment(segment) => segment.size(),
364 Inner::Kva(kva, _) => kva.size(),
365 Inner::Both(kva, _, segment) => {
366 debug_assert_eq!(kva.size(), segment.size());
367 kva.size()
368 }
369 }
370 }
371}
372
373impl<D: DmaDirection> HasVmReaderWriter for DmaStream<D> {
374 type Types = VmReaderWriterResult;
375
376 fn reader(&self) -> Result<VmReader<'_, Infallible>, Error> {
377 if !D::CAN_READ_FROM_DEVICE {
378 return Err(Error::AccessDenied);
379 }
380 match &self.inner {
381 Inner::Segment(seg) | Inner::Both(_, _, seg) => Ok(seg.reader()),
382 Inner::Kva(kva, _) => {
383 unsafe {
389 Ok(VmReader::from_kernel_space(
390 kva.start() as *const u8,
391 kva.size(),
392 ))
393 }
394 }
395 }
396 }
397
398 fn writer(&self) -> Result<VmWriter<'_, Infallible>, Error> {
399 if !D::CAN_WRITE_TO_DEVICE {
400 return Err(Error::AccessDenied);
401 }
402 match &self.inner {
403 Inner::Segment(seg) | Inner::Both(_, _, seg) => Ok(seg.writer()),
404 Inner::Kva(kva, _) => {
405 unsafe {
411 Ok(VmWriter::from_kernel_space(
412 kva.start() as *mut u8,
413 kva.size(),
414 ))
415 }
416 }
417 }
418 }
419}