1use core::{fmt::Debug, marker::PhantomData, mem::ManuallyDrop, ops::Range};
4
5use super::util::{
6 alloc_kva, cvm_need_private_protection, prepare_dma, split_daddr, unprepare_dma,
7};
8use crate::{
9 arch::{irq, mm::can_sync_dma},
10 error::Error,
11 mm::{
12 Daddr, FrameAllocOptions, HasDaddr, HasPaddr, HasPaddrRange, HasSize, Infallible,
13 PAGE_SIZE, Paddr, Split, USegment, VmReader, VmWriter,
14 io::util::{HasVmReaderWriter, VmReaderWriterResult},
15 kspace::kvirt_area::KVirtArea,
16 paddr_to_vaddr,
17 },
18};
19
20pub trait DmaDirection: 'static + Debug + private::Sealed {
23 const CAN_READ_FROM_DEVICE: bool;
25 const CAN_WRITE_TO_DEVICE: bool;
27}
28
29mod private {
30 pub trait Sealed {}
34}
35
36#[derive(Debug)]
40pub enum ToDevice {}
41
42impl private::Sealed for ToDevice {}
43impl DmaDirection for ToDevice {
44 const CAN_READ_FROM_DEVICE: bool = false;
45 const CAN_WRITE_TO_DEVICE: bool = true;
46}
47
48#[derive(Debug)]
52pub enum FromDevice {}
53
54impl private::Sealed for FromDevice {}
55impl DmaDirection for FromDevice {
56 const CAN_READ_FROM_DEVICE: bool = true;
57 const CAN_WRITE_TO_DEVICE: bool = false;
58}
59
60#[derive(Debug)]
62pub enum FromAndToDevice {}
63
64impl private::Sealed for FromAndToDevice {}
65impl DmaDirection for FromAndToDevice {
66 const CAN_READ_FROM_DEVICE: bool = true;
67 const CAN_WRITE_TO_DEVICE: bool = true;
68}
69
70#[derive(Debug)]
81pub struct DmaStream<D: DmaDirection = FromAndToDevice> {
82 inner: Inner,
83 map_daddr: Option<Daddr>,
84 is_cache_coherent: bool,
85 _phantom: PhantomData<D>,
86}
87
88#[derive(Debug)]
89enum Inner {
90 Segment(USegment),
91 Kva(KVirtArea, Paddr),
92 Both(KVirtArea, Paddr, USegment),
93}
94
95impl<D: DmaDirection> DmaStream<D> {
96 pub fn alloc(nframes: usize, is_cache_coherent: bool) -> Result<Self, Error> {
118 const { assert!(D::CAN_WRITE_TO_DEVICE) };
119
120 Self::alloc_uninit(nframes, is_cache_coherent).and_then(|dma| {
121 dma.writer()?.fill_zeros(dma.size());
122 Ok(dma)
123 })
124 }
125
126 pub fn alloc_uninit(nframes: usize, is_cache_coherent: bool) -> Result<Self, Error> {
135 debug_assert!(irq::is_local_enabled());
136
137 let cvm = cvm_need_private_protection();
138
139 let (inner, paddr_range) = if (can_sync_dma() || is_cache_coherent) && !cvm {
140 let segment: USegment = FrameAllocOptions::new()
141 .zeroed(false)
142 .alloc_segment(nframes)?
143 .into();
144 let paddr_range = segment.paddr_range();
145
146 (Inner::Segment(segment), paddr_range)
147 } else {
148 let (kva, paddr) = alloc_kva(nframes, can_sync_dma() || is_cache_coherent)?;
149
150 (Inner::Kva(kva, paddr), paddr..paddr + nframes * PAGE_SIZE)
151 };
152
153 let map_daddr = unsafe { prepare_dma(&paddr_range) };
155
156 Ok(Self {
157 inner,
158 map_daddr,
159 is_cache_coherent,
160 _phantom: PhantomData,
161 })
162 }
163
164 pub fn map(segment: USegment, is_cache_coherent: bool) -> Result<Self, Error> {
173 debug_assert!(irq::is_local_enabled());
174
175 let cvm = cvm_need_private_protection();
176 let size = segment.size();
177
178 let (inner, paddr) = if (can_sync_dma() || is_cache_coherent) && !cvm {
179 let paddr = segment.paddr();
180
181 (Inner::Segment(segment), paddr)
182 } else {
183 let (kva, paddr) = alloc_kva(size / PAGE_SIZE, is_cache_coherent)?;
184
185 (Inner::Both(kva, paddr, segment), paddr)
186 };
187
188 let paddr_range = paddr..paddr + size;
189
190 let map_daddr = unsafe { prepare_dma(&paddr_range) };
192
193 Ok(Self {
194 inner,
195 map_daddr,
196 is_cache_coherent,
197 _phantom: PhantomData,
198 })
199 }
200
201 pub fn sync_from_device(&self, byte_range: Range<usize>) -> Result<(), Error> {
210 const { assert!(D::CAN_READ_FROM_DEVICE) };
211
212 self.sync_impl(byte_range, true)
213 }
214
215 pub fn sync_to_device(&self, byte_range: Range<usize>) -> Result<(), Error> {
224 const { assert!(D::CAN_WRITE_TO_DEVICE) };
225
226 self.sync_impl(byte_range, false)
227 }
228
229 fn sync_impl(&self, byte_range: Range<usize>, is_from_device: bool) -> Result<(), Error> {
230 let size = self.size();
231 if byte_range.end > size || byte_range.start > size {
232 return Err(Error::InvalidArgs);
233 }
234 if self.is_cache_coherent {
235 return Ok(());
236 }
237
238 let va_range = match &self.inner {
239 Inner::Segment(segment) => {
240 let pa_range = segment.paddr_range();
241 paddr_to_vaddr(pa_range.start)..paddr_to_vaddr(pa_range.end)
242 }
243 Inner::Kva(kva, _) => {
244 if !can_sync_dma() {
245 return Ok(());
247 }
248 kva.range()
249 }
250 Inner::Both(kva, _, seg) => {
251 self.sync_via_copying(byte_range, is_from_device, seg, kva);
252 return Ok(());
253 }
254 };
255 let range = va_range.start + byte_range.start..va_range.start + byte_range.end;
256
257 unsafe { crate::arch::mm::sync_dma_range::<D>(range) };
264
265 Ok(())
266 }
267
268 fn sync_via_copying(
269 &self,
270 byte_range: Range<usize>,
271 is_from_device: bool,
272 seg: &USegment,
273 kva: &KVirtArea,
274 ) {
275 let skip = byte_range.start;
276 let limit = byte_range.len();
277
278 let (mut reader, mut writer) = if is_from_device {
279 let kva_reader =
284 unsafe { VmReader::from_kernel_space(kva.start() as *const u8, kva.size()) };
285
286 (kva_reader, seg.writer())
287 } else {
288 let kva_writer =
293 unsafe { VmWriter::from_kernel_space(kva.start() as *mut u8, kva.size()) };
294
295 (seg.reader(), kva_writer)
296 };
297
298 writer
299 .skip(skip)
300 .limit(limit)
301 .write(reader.skip(skip).limit(limit));
302 }
303}
304
305impl<D: DmaDirection> Split for DmaStream<D> {
306 fn split(self, offset: usize) -> (Self, Self) {
307 assert!(offset.is_multiple_of(PAGE_SIZE));
308 assert!(0 < offset && offset < self.size());
309
310 let (inner, map_daddr, is_cache_coherent) = {
311 let this = ManuallyDrop::new(self);
312 (
313 unsafe { core::ptr::read(&this.inner as *const Inner) },
315 this.map_daddr,
316 this.is_cache_coherent,
317 )
318 };
319
320 let (inner1, inner2) = match inner {
321 Inner::Segment(segment) => {
322 let (s1, s2) = segment.split(offset);
323 (Inner::Segment(s1), Inner::Segment(s2))
324 }
325 Inner::Kva(kva, paddr) => {
326 let (kva1, kva2) = kva.split(offset);
327 let (paddr1, paddr2) = (paddr, paddr + offset);
328 (Inner::Kva(kva1, paddr1), Inner::Kva(kva2, paddr2))
329 }
330 Inner::Both(kva, paddr, segment) => {
331 let (kva1, kva2) = kva.split(offset);
332 let (paddr1, paddr2) = (paddr, paddr + offset);
333 let (s1, s2) = segment.split(offset);
334 (Inner::Both(kva1, paddr1, s1), Inner::Both(kva2, paddr2, s2))
335 }
336 };
337
338 let (daddr1, daddr2) = split_daddr(map_daddr, offset);
339
340 (
341 Self {
342 inner: inner1,
343 map_daddr: daddr1,
344 is_cache_coherent,
345 _phantom: PhantomData,
346 },
347 Self {
348 inner: inner2,
349 map_daddr: daddr2,
350 is_cache_coherent,
351 _phantom: PhantomData,
352 },
353 )
354 }
355}
356
357impl<D: DmaDirection> Drop for DmaStream<D> {
358 fn drop(&mut self) {
359 unsafe { unprepare_dma(&self.paddr_range(), self.map_daddr) };
361 }
362}
363
364impl<D: DmaDirection> HasPaddr for DmaStream<D> {
365 fn paddr(&self) -> Paddr {
366 match &self.inner {
367 Inner::Segment(segment) => segment.paddr(),
368 Inner::Kva(_, paddr) | Inner::Both(_, paddr, _) => *paddr, }
370 }
371}
372
373impl<D: DmaDirection> HasDaddr for DmaStream<D> {
374 fn daddr(&self) -> Daddr {
375 self.map_daddr.unwrap_or_else(|| self.paddr() as Daddr)
376 }
377}
378
379impl<D: DmaDirection> HasSize for DmaStream<D> {
380 fn size(&self) -> usize {
381 match &self.inner {
382 Inner::Segment(segment) => segment.size(),
383 Inner::Kva(kva, _) => kva.size(),
384 Inner::Both(kva, _, segment) => {
385 debug_assert_eq!(kva.size(), segment.size());
386 kva.size()
387 }
388 }
389 }
390}
391
392impl<D: DmaDirection> HasVmReaderWriter for DmaStream<D> {
393 type Types = VmReaderWriterResult;
394
395 fn reader(&self) -> Result<VmReader<'_, Infallible>, Error> {
396 if !D::CAN_READ_FROM_DEVICE {
397 return Err(Error::AccessDenied);
398 }
399 match &self.inner {
400 Inner::Segment(seg) | Inner::Both(_, _, seg) => Ok(seg.reader()),
401 Inner::Kva(kva, _) => {
402 unsafe {
408 Ok(VmReader::from_kernel_space(
409 kva.start() as *const u8,
410 kva.size(),
411 ))
412 }
413 }
414 }
415 }
416
417 fn writer(&self) -> Result<VmWriter<'_, Infallible>, Error> {
418 if !D::CAN_WRITE_TO_DEVICE {
419 return Err(Error::AccessDenied);
420 }
421 match &self.inner {
422 Inner::Segment(seg) | Inner::Both(_, _, seg) => Ok(seg.writer()),
423 Inner::Kva(kva, _) => {
424 unsafe {
430 Ok(VmWriter::from_kernel_space(
431 kva.start() as *mut u8,
432 kva.size(),
433 ))
434 }
435 }
436 }
437 }
438}