ostd/mm/dma/
dma_coherent.rs1use core::ops::Deref;
4
5use cfg_if::cfg_if;
6
7use super::{DmaError, check_and_insert_dma_mapping, remove_dma_mapping};
8use crate::{
9 arch::iommu,
10 mm::{
11 HasDaddr, HasPaddr, HasSize, Infallible, PAGE_SIZE, Paddr, USegment, VmReader, VmWriter,
12 dma::{Daddr, DmaType, dma_type},
13 io_util::{HasVmReaderWriter, VmReaderWriterIdentity},
14 kspace::{KERNEL_PAGE_TABLE, paddr_to_vaddr},
15 page_prop::CachePolicy,
16 },
17};
18
19cfg_if! {
20 if #[cfg(all(target_arch = "x86_64", feature = "cvm_guest"))] {
21 use crate::arch::tdx_guest;
22 }
23}
24
25#[derive(Debug)]
32pub struct DmaCoherent {
33 segment: USegment,
34 start_daddr: Daddr,
35 is_cache_coherent: bool,
36}
37
38impl DmaCoherent {
39 pub fn map(segment: USegment, is_cache_coherent: bool) -> core::result::Result<Self, DmaError> {
49 let paddr = segment.paddr();
50 let frame_count = segment.size() / PAGE_SIZE;
51
52 if !check_and_insert_dma_mapping(paddr, frame_count) {
53 return Err(DmaError::AlreadyMapped);
54 }
55
56 if !is_cache_coherent {
57 let page_table = KERNEL_PAGE_TABLE.get().unwrap();
58 let vaddr = paddr_to_vaddr(paddr);
59 let va_range = vaddr..vaddr + (frame_count * PAGE_SIZE);
60 unsafe {
62 page_table
63 .protect_flush_tlb(&va_range, |p| p.cache = CachePolicy::Uncacheable)
64 .unwrap();
65 }
66 }
67
68 let start_daddr = match dma_type() {
69 DmaType::Direct => {
70 #[cfg(target_arch = "x86_64")]
71 crate::arch::if_tdx_enabled!({
72 unsafe {
80 tdx_guest::unprotect_gpa_range(paddr, frame_count).unwrap();
81 }
82 });
83 paddr as Daddr
84 }
85 DmaType::Iommu => {
86 for i in 0..frame_count {
87 let paddr = paddr + (i * PAGE_SIZE);
88 unsafe {
90 iommu::map(paddr as Daddr, paddr).unwrap();
91 }
92 }
93 paddr as Daddr
94 }
95 };
96
97 Ok(Self {
98 segment,
99 start_daddr,
100 is_cache_coherent,
101 })
102 }
103}
104
105impl Deref for DmaCoherent {
106 type Target = USegment;
107 fn deref(&self) -> &Self::Target {
108 &self.segment
109 }
110}
111
112impl Drop for DmaCoherent {
113 fn drop(&mut self) {
114 let paddr = self.segment.paddr();
115 let frame_count = self.segment.size() / PAGE_SIZE;
116
117 match dma_type() {
118 DmaType::Direct => {
119 #[cfg(target_arch = "x86_64")]
120 crate::arch::if_tdx_enabled!({
121 unsafe {
129 tdx_guest::protect_gpa_range(paddr, frame_count).unwrap();
130 }
131 });
132 }
133 DmaType::Iommu => {
134 for i in 0..frame_count {
135 let paddr = paddr + (i * PAGE_SIZE);
136 iommu::unmap(paddr as Daddr).unwrap();
137 }
139 }
140 }
141
142 if !self.is_cache_coherent {
143 let page_table = KERNEL_PAGE_TABLE.get().unwrap();
144 let vaddr = paddr_to_vaddr(paddr);
145 let va_range = vaddr..vaddr + (frame_count * PAGE_SIZE);
146 unsafe {
148 page_table
149 .protect_flush_tlb(&va_range, |p| p.cache = CachePolicy::Writeback)
150 .unwrap();
151 }
152 }
153
154 remove_dma_mapping(paddr, frame_count);
155 }
156}
157
158impl HasPaddr for DmaCoherent {
159 fn paddr(&self) -> Paddr {
160 self.segment.paddr()
161 }
162}
163
164impl HasSize for DmaCoherent {
165 fn size(&self) -> usize {
166 self.segment.size()
167 }
168}
169
170impl HasDaddr for DmaCoherent {
171 fn daddr(&self) -> Daddr {
172 self.start_daddr
173 }
174}
175
176impl HasVmReaderWriter for DmaCoherent {
177 type Types = VmReaderWriterIdentity;
178
179 fn reader(&self) -> VmReader<'_, Infallible> {
180 self.segment.reader()
181 }
182
183 fn writer(&self) -> VmWriter<'_, Infallible> {
184 self.segment.writer()
185 }
186}