1use core::{fmt::Debug, marker::PhantomData, mem::ManuallyDrop, ops::Range};
4
5use super::util::{
6 alloc_kva, cvm_need_private_protection, prepare_dma, split_daddr, unprepare_dma,
7};
8use crate::{
9 arch::mm::can_sync_dma,
10 error::Error,
11 mm::{
12 Daddr, FrameAllocOptions, HasDaddr, HasPaddr, HasPaddrRange, HasSize, Infallible,
13 PAGE_SIZE, Paddr, Split, USegment, VmReader, VmWriter,
14 io_util::{HasVmReaderWriter, VmReaderWriterResult},
15 kspace::kvirt_area::KVirtArea,
16 paddr_to_vaddr,
17 },
18};
19
20pub trait DmaDirection: 'static + Debug + private::Sealed {
23 const CAN_READ_FROM_DEVICE: bool;
25 const CAN_WRITE_TO_DEVICE: bool;
27}
28
29mod private {
30 pub trait Sealed {}
34}
35
36#[derive(Debug)]
40pub enum ToDevice {}
41
42impl private::Sealed for ToDevice {}
43impl DmaDirection for ToDevice {
44 const CAN_READ_FROM_DEVICE: bool = false;
45 const CAN_WRITE_TO_DEVICE: bool = true;
46}
47
48#[derive(Debug)]
52pub enum FromDevice {}
53
54impl private::Sealed for FromDevice {}
55impl DmaDirection for FromDevice {
56 const CAN_READ_FROM_DEVICE: bool = true;
57 const CAN_WRITE_TO_DEVICE: bool = false;
58}
59
60#[derive(Debug)]
62pub enum FromAndToDevice {}
63
64impl private::Sealed for FromAndToDevice {}
65impl DmaDirection for FromAndToDevice {
66 const CAN_READ_FROM_DEVICE: bool = true;
67 const CAN_WRITE_TO_DEVICE: bool = true;
68}
69
70#[derive(Debug)]
78pub struct DmaStream<D: DmaDirection = FromAndToDevice> {
79 inner: Inner,
80 map_daddr: Option<Daddr>,
81 is_cache_coherent: bool,
82 _phantom: PhantomData<D>,
83}
84
85#[derive(Debug)]
86enum Inner {
87 Segment(USegment),
88 Kva(KVirtArea, Paddr),
89 Both(KVirtArea, Paddr, USegment),
90}
91
92impl<D: DmaDirection> DmaStream<D> {
93 pub fn alloc(nframes: usize, is_cache_coherent: bool) -> Result<Self, Error> {
112 const { assert!(D::CAN_WRITE_TO_DEVICE) };
113
114 Self::alloc_uninit(nframes, is_cache_coherent).and_then(|dma| {
115 dma.writer()?.fill_zeros(dma.size());
116 Ok(dma)
117 })
118 }
119
120 pub fn alloc_uninit(nframes: usize, is_cache_coherent: bool) -> Result<Self, Error> {
126 let cvm = cvm_need_private_protection();
127
128 let (inner, paddr_range) = if (can_sync_dma() || is_cache_coherent) && !cvm {
129 let segment: USegment = FrameAllocOptions::new()
130 .zeroed(false)
131 .alloc_segment(nframes)?
132 .into();
133 let paddr_range = segment.paddr_range();
134
135 (Inner::Segment(segment), paddr_range)
136 } else {
137 let (kva, paddr) = alloc_kva(nframes, can_sync_dma() || is_cache_coherent)?;
138
139 (Inner::Kva(kva, paddr), paddr..paddr + nframes * PAGE_SIZE)
140 };
141
142 let map_daddr = unsafe { prepare_dma(&paddr_range) };
144
145 Ok(Self {
146 inner,
147 map_daddr,
148 is_cache_coherent,
149 _phantom: PhantomData,
150 })
151 }
152
153 pub fn map(segment: USegment, is_cache_coherent: bool) -> Result<Self, Error> {
159 let cvm = cvm_need_private_protection();
160 let size = segment.size();
161
162 let (inner, paddr) = if (can_sync_dma() || is_cache_coherent) && !cvm {
163 let paddr = segment.paddr();
164
165 (Inner::Segment(segment), paddr)
166 } else {
167 let (kva, paddr) = alloc_kva(size / PAGE_SIZE, is_cache_coherent)?;
168
169 (Inner::Both(kva, paddr, segment), paddr)
170 };
171
172 let paddr_range = paddr..paddr + size;
173
174 let map_daddr = unsafe { prepare_dma(&paddr_range) };
176
177 Ok(Self {
178 inner,
179 map_daddr,
180 is_cache_coherent,
181 _phantom: PhantomData,
182 })
183 }
184
185 pub fn sync_from_device(&self, byte_range: Range<usize>) -> Result<(), Error> {
194 const { assert!(D::CAN_READ_FROM_DEVICE) };
195
196 self.sync_impl(byte_range, true)
197 }
198
199 pub fn sync_to_device(&self, byte_range: Range<usize>) -> Result<(), Error> {
208 const { assert!(D::CAN_WRITE_TO_DEVICE) };
209
210 self.sync_impl(byte_range, false)
211 }
212
213 fn sync_impl(&self, byte_range: Range<usize>, is_from_device: bool) -> Result<(), Error> {
214 let size = self.size();
215 if byte_range.end > size || byte_range.start > size {
216 return Err(Error::InvalidArgs);
217 }
218 if self.is_cache_coherent {
219 return Ok(());
220 }
221
222 let va_range = match &self.inner {
223 Inner::Segment(segment) => {
224 let pa_range = segment.paddr_range();
225 paddr_to_vaddr(pa_range.start)..paddr_to_vaddr(pa_range.end)
226 }
227 Inner::Kva(kva, _) => {
228 if !can_sync_dma() {
229 return Ok(());
231 }
232 kva.range()
233 }
234 Inner::Both(kva, _, seg) => {
235 self.sync_via_copying(byte_range, is_from_device, seg, kva);
236 return Ok(());
237 }
238 };
239 let range = va_range.start + byte_range.start..va_range.start + byte_range.end;
240
241 unsafe { crate::arch::mm::sync_dma_range::<D>(range) };
248
249 Ok(())
250 }
251
252 fn sync_via_copying(
253 &self,
254 byte_range: Range<usize>,
255 is_from_device: bool,
256 seg: &USegment,
257 kva: &KVirtArea,
258 ) {
259 let skip = byte_range.start;
260 let limit = byte_range.len();
261
262 let (mut reader, mut writer) = if is_from_device {
263 let kva_reader =
268 unsafe { VmReader::from_kernel_space(kva.start() as *const u8, kva.size()) };
269
270 (kva_reader, seg.writer())
271 } else {
272 let kva_writer =
277 unsafe { VmWriter::from_kernel_space(kva.start() as *mut u8, kva.size()) };
278
279 (seg.reader(), kva_writer)
280 };
281
282 writer
283 .skip(skip)
284 .limit(limit)
285 .write(reader.skip(skip).limit(limit));
286 }
287}
288
289impl<D: DmaDirection> Split for DmaStream<D> {
290 fn split(self, offset: usize) -> (Self, Self) {
291 assert!(offset.is_multiple_of(PAGE_SIZE));
292 assert!(0 < offset && offset < self.size());
293
294 let (inner, map_daddr, is_cache_coherent) = {
295 let this = ManuallyDrop::new(self);
296 (
297 unsafe { core::ptr::read(&this.inner as *const Inner) },
299 this.map_daddr,
300 this.is_cache_coherent,
301 )
302 };
303
304 let (inner1, inner2) = match inner {
305 Inner::Segment(segment) => {
306 let (s1, s2) = segment.split(offset);
307 (Inner::Segment(s1), Inner::Segment(s2))
308 }
309 Inner::Kva(kva, paddr) => {
310 let (kva1, kva2) = kva.split(offset);
311 let (paddr1, paddr2) = (paddr, paddr + offset);
312 (Inner::Kva(kva1, paddr1), Inner::Kva(kva2, paddr2))
313 }
314 Inner::Both(kva, paddr, segment) => {
315 let (kva1, kva2) = kva.split(offset);
316 let (paddr1, paddr2) = (paddr, paddr + offset);
317 let (s1, s2) = segment.split(offset);
318 (Inner::Both(kva1, paddr1, s1), Inner::Both(kva2, paddr2, s2))
319 }
320 };
321
322 let (daddr1, daddr2) = split_daddr(map_daddr, offset);
323
324 (
325 Self {
326 inner: inner1,
327 map_daddr: daddr1,
328 is_cache_coherent,
329 _phantom: PhantomData,
330 },
331 Self {
332 inner: inner2,
333 map_daddr: daddr2,
334 is_cache_coherent,
335 _phantom: PhantomData,
336 },
337 )
338 }
339}
340
341impl<D: DmaDirection> Drop for DmaStream<D> {
342 fn drop(&mut self) {
343 unsafe { unprepare_dma(&self.paddr_range(), self.map_daddr) };
345 }
346}
347
348impl<D: DmaDirection> HasPaddr for DmaStream<D> {
349 fn paddr(&self) -> Paddr {
350 match &self.inner {
351 Inner::Segment(segment) => segment.paddr(),
352 Inner::Kva(_, paddr) | Inner::Both(_, paddr, _) => *paddr, }
354 }
355}
356
357impl<D: DmaDirection> HasDaddr for DmaStream<D> {
358 fn daddr(&self) -> Daddr {
359 self.map_daddr.unwrap_or_else(|| self.paddr() as Daddr)
360 }
361}
362
363impl<D: DmaDirection> HasSize for DmaStream<D> {
364 fn size(&self) -> usize {
365 match &self.inner {
366 Inner::Segment(segment) => segment.size(),
367 Inner::Kva(kva, _) => kva.size(),
368 Inner::Both(kva, _, segment) => {
369 debug_assert_eq!(kva.size(), segment.size());
370 kva.size()
371 }
372 }
373 }
374}
375
376impl<D: DmaDirection> HasVmReaderWriter for DmaStream<D> {
377 type Types = VmReaderWriterResult;
378
379 fn reader(&self) -> Result<VmReader<'_, Infallible>, Error> {
380 if !D::CAN_READ_FROM_DEVICE {
381 return Err(Error::AccessDenied);
382 }
383 match &self.inner {
384 Inner::Segment(seg) | Inner::Both(_, _, seg) => Ok(seg.reader()),
385 Inner::Kva(kva, _) => {
386 unsafe {
392 Ok(VmReader::from_kernel_space(
393 kva.start() as *const u8,
394 kva.size(),
395 ))
396 }
397 }
398 }
399 }
400
401 fn writer(&self) -> Result<VmWriter<'_, Infallible>, Error> {
402 if !D::CAN_WRITE_TO_DEVICE {
403 return Err(Error::AccessDenied);
404 }
405 match &self.inner {
406 Inner::Segment(seg) | Inner::Both(_, _, seg) => Ok(seg.writer()),
407 Inner::Kva(kva, _) => {
408 unsafe {
414 Ok(VmWriter::from_kernel_space(
415 kva.start() as *mut u8,
416 kva.size(),
417 ))
418 }
419 }
420 }
421 }
422}