1use alloc::vec::Vec;
6use core::{
7 mem::MaybeUninit,
8 ops::Range,
9 sync::atomic::{AtomicBool, Ordering},
10};
11
12use super::{
13 PAGE_SIZE, Vaddr,
14 frame::{Frame, meta::AnyFrameMeta},
15};
16use crate::{
17 arch::irq,
18 const_assert,
19 cpu::{AtomicCpuSet, CpuSet, PinCurrentCpu},
20 cpu_local,
21 smp::IpiSender,
22 sync::{LocalIrqDisabled, SpinLock},
23};
24
25pub struct TlbFlusher<'a, G: PinCurrentCpu> {
29 target_cpus: &'a AtomicCpuSet,
30 have_unsynced_flush: CpuSet,
31 ops_stack: OpsStack,
32 ipi_sender: Option<&'static IpiSender>,
33 _pin_current: G,
34}
35
36impl<'a, G: PinCurrentCpu> TlbFlusher<'a, G> {
37 pub fn new(target_cpus: &'a AtomicCpuSet, pin_current_guard: G) -> Self {
45 Self {
46 target_cpus,
47 have_unsynced_flush: CpuSet::new_empty(),
48 ops_stack: OpsStack::new(),
49 ipi_sender: crate::smp::IPI_SENDER.get(),
50 _pin_current: pin_current_guard,
51 }
52 }
53
54 pub fn issue_tlb_flush(&mut self, op: TlbFlushOp) {
60 self.ops_stack.push(op, None);
61 }
62
63 pub fn issue_tlb_flush_with(
71 &mut self,
72 op: TlbFlushOp,
73 drop_after_flush: Frame<dyn AnyFrameMeta>,
74 ) {
75 self.ops_stack.push(op, Some(drop_after_flush));
76 }
77
78 pub fn dispatch_tlb_flush(&mut self) {
85 let irq_guard = crate::irq::disable_local();
86
87 if self.ops_stack.is_empty() {
88 return;
89 }
90
91 let mut target_cpus = self.target_cpus.load(Ordering::Release);
94
95 let cur_cpu = irq_guard.current_cpu();
96 let mut need_flush_on_self = false;
97
98 if target_cpus.contains(cur_cpu) {
99 target_cpus.remove(cur_cpu);
100 need_flush_on_self = true;
101 }
102
103 if let Some(ipi_sender) = self.ipi_sender {
104 for cpu in target_cpus.iter() {
105 self.have_unsynced_flush.add(cpu);
106
107 let mut flush_ops = FLUSH_OPS.get_on_cpu(cpu).lock();
108 flush_ops.push_from(&self.ops_stack);
109 ACK_REMOTE_FLUSH
111 .get_on_cpu(cpu)
112 .store(false, Ordering::Relaxed);
113 }
114
115 ipi_sender.inter_processor_call(&target_cpus, do_remote_flush);
116 }
117
118 if need_flush_on_self {
120 self.ops_stack.flush_all();
121 } else {
122 self.ops_stack.clear_without_flush();
123 }
124 }
125
126 pub fn sync_tlb_flush(&mut self) {
142 if self.ipi_sender.is_none() {
143 return;
146 }
147
148 assert!(
149 irq::is_local_enabled(),
150 "Waiting for remote flush with IRQs disabled"
151 );
152
153 for cpu in self.have_unsynced_flush.iter() {
154 while !ACK_REMOTE_FLUSH.get_on_cpu(cpu).load(Ordering::Relaxed) {
155 core::hint::spin_loop();
156 }
157 }
158
159 self.have_unsynced_flush = CpuSet::new_empty();
160 }
161}
162
163#[derive(Debug, Clone, PartialEq, Eq)]
174pub struct TlbFlushOp(Vaddr);
175
176const_assert!(TlbFlushOp::FLUSH_RANGE_NPAGES_MASK | (PAGE_SIZE - 1) == PAGE_SIZE - 1);
180
181impl TlbFlushOp {
182 const FLUSH_ALL_VAL: Vaddr = Vaddr::MAX;
183 const FLUSH_RANGE_NPAGES_MASK: Vaddr =
184 (1 << (usize::BITS - FLUSH_ALL_PAGES_THRESHOLD.leading_zeros())) - 1;
185
186 pub fn perform_on_current(&self) {
188 use crate::arch::mm::{
189 tlb_flush_addr, tlb_flush_addr_range, tlb_flush_all_excluding_global,
190 };
191 match self.0 {
192 Self::FLUSH_ALL_VAL => tlb_flush_all_excluding_global(),
193 addr => {
194 let start = addr & !Self::FLUSH_RANGE_NPAGES_MASK;
195 let num_pages = addr & Self::FLUSH_RANGE_NPAGES_MASK;
196
197 debug_assert!((addr & (PAGE_SIZE - 1)) < FLUSH_ALL_PAGES_THRESHOLD);
198 debug_assert!(num_pages != 0);
199
200 if num_pages == 1 {
201 tlb_flush_addr(start);
202 } else {
203 tlb_flush_addr_range(&(start..start + num_pages * PAGE_SIZE));
204 }
205 }
206 }
207 }
208
209 pub const fn for_all() -> Self {
212 TlbFlushOp(Self::FLUSH_ALL_VAL)
213 }
214
215 pub const fn for_single(addr: Vaddr) -> Self {
218 TlbFlushOp(addr | 1)
219 }
220
221 pub const fn for_range(range: Range<Vaddr>) -> Self {
231 assert!(
232 range.start.is_multiple_of(PAGE_SIZE),
233 "Range start must be page-aligned"
234 );
235 assert!(
236 range.end.is_multiple_of(PAGE_SIZE),
237 "Range end must be page-aligned"
238 );
239 assert!(range.start < range.end, "Range must not be empty");
240 let num_pages = (range.end - range.start) / PAGE_SIZE;
241 if num_pages >= FLUSH_ALL_PAGES_THRESHOLD {
242 return TlbFlushOp::for_all();
243 }
244 TlbFlushOp(range.start | (num_pages as Vaddr))
245 }
246
247 fn num_pages(&self) -> u32 {
253 if self.0 == Self::FLUSH_ALL_VAL {
254 u32::MAX
255 } else {
256 debug_assert!((self.0 & (PAGE_SIZE - 1)) < FLUSH_ALL_PAGES_THRESHOLD);
257 let num_pages = (self.0 & Self::FLUSH_RANGE_NPAGES_MASK) as u32;
258 debug_assert!(num_pages != 0);
259 num_pages
260 }
261 }
262}
263
264cpu_local! {
266 static FLUSH_OPS: SpinLock<OpsStack, LocalIrqDisabled> = SpinLock::new(OpsStack::new());
267 static ACK_REMOTE_FLUSH: AtomicBool = AtomicBool::new(true);
269}
270
271fn do_remote_flush() {
272 let current_cpu = crate::cpu::CpuId::current_racy();
274
275 let mut new_op_queue = OpsStack::new();
276 {
277 let mut op_queue = FLUSH_OPS.get_on_cpu(current_cpu).lock();
278
279 core::mem::swap(&mut *op_queue, &mut new_op_queue);
280
281 ACK_REMOTE_FLUSH
283 .get_on_cpu(current_cpu)
284 .store(true, Ordering::Relaxed);
285 }
286 new_op_queue.flush_all();
289}
290
291const FLUSH_ALL_PAGES_THRESHOLD: usize = 32;
294
295struct OpsStack {
296 ops: [MaybeUninit<TlbFlushOp>; FLUSH_ALL_PAGES_THRESHOLD],
298 num_ops: u32,
299 num_pages_to_flush: u32,
304 page_keeper: Vec<Frame<dyn AnyFrameMeta>>,
305}
306
307impl OpsStack {
308 const fn new() -> Self {
309 Self {
310 ops: [const { MaybeUninit::uninit() }; FLUSH_ALL_PAGES_THRESHOLD],
311 num_ops: 0,
312 num_pages_to_flush: 0,
313 page_keeper: Vec::new(),
314 }
315 }
316
317 fn is_empty(&self) -> bool {
318 self.num_ops == 0 && self.num_pages_to_flush == 0
319 }
320
321 fn need_flush_all(&self) -> bool {
322 self.num_pages_to_flush == u32::MAX
323 }
324
325 fn push(&mut self, op: TlbFlushOp, drop_after_flush: Option<Frame<dyn AnyFrameMeta>>) {
326 if let Some(frame) = drop_after_flush {
327 self.page_keeper.push(frame);
328 }
329
330 if self.need_flush_all() {
331 return;
332 }
333 let op_num_pages = op.num_pages();
334 if op == TlbFlushOp::for_all()
335 || self.num_pages_to_flush + op_num_pages >= FLUSH_ALL_PAGES_THRESHOLD as u32
336 {
337 self.num_pages_to_flush = u32::MAX;
338 self.num_ops = 0;
339 return;
340 }
341
342 self.ops[self.num_ops as usize].write(op);
343 self.num_ops += 1;
344 self.num_pages_to_flush += op_num_pages;
345 }
346
347 fn push_from(&mut self, other: &OpsStack) {
348 self.page_keeper.extend(other.page_keeper.iter().cloned());
349
350 if self.need_flush_all() {
351 return;
352 }
353 if other.need_flush_all()
354 || self.num_pages_to_flush + other.num_pages_to_flush
355 >= FLUSH_ALL_PAGES_THRESHOLD as u32
356 {
357 self.num_pages_to_flush = u32::MAX;
358 self.num_ops = 0;
359 return;
360 }
361
362 for other_op in other.ops_iter() {
363 self.ops[self.num_ops as usize].write(other_op.clone());
364 self.num_ops += 1;
365 }
366 self.num_pages_to_flush += other.num_pages_to_flush;
367 }
368
369 fn flush_all(&mut self) {
370 if self.need_flush_all() {
371 crate::arch::mm::tlb_flush_all_excluding_global();
372 } else {
373 self.ops_iter().for_each(|op| {
374 op.perform_on_current();
375 });
376 }
377
378 self.clear_without_flush();
379 }
380
381 fn clear_without_flush(&mut self) {
382 self.num_pages_to_flush = 0;
383 self.num_ops = 0;
384 self.page_keeper.clear();
385 }
386
387 fn ops_iter(&self) -> impl Iterator<Item = &TlbFlushOp> {
388 self.ops.iter().take(self.num_ops as usize).map(|op| {
389 unsafe { op.assume_init_ref() }
391 })
392 }
393}