1use alloc::vec::Vec;
6use core::{
7 mem::MaybeUninit,
8 ops::Range,
9 sync::atomic::{AtomicBool, Ordering},
10};
11
12use super::{
13 PAGE_SIZE, Vaddr,
14 frame::{Frame, meta::AnyFrameMeta},
15};
16use crate::{
17 arch::irq,
18 const_assert,
19 cpu::{AtomicCpuSet, CpuSet, PinCurrentCpu},
20 cpu_local,
21 sync::{LocalIrqDisabled, SpinLock},
22};
23
24pub struct TlbFlusher<'a, G: PinCurrentCpu> {
28 target_cpus: &'a AtomicCpuSet,
29 have_unsynced_flush: CpuSet,
30 ops_stack: OpsStack,
31 _pin_current: G,
32}
33
34impl<'a, G: PinCurrentCpu> TlbFlusher<'a, G> {
35 pub fn new(target_cpus: &'a AtomicCpuSet, pin_current_guard: G) -> Self {
43 Self {
44 target_cpus,
45 have_unsynced_flush: CpuSet::new_empty(),
46 ops_stack: OpsStack::new(),
47 _pin_current: pin_current_guard,
48 }
49 }
50
51 pub fn issue_tlb_flush(&mut self, op: TlbFlushOp) {
57 self.ops_stack.push(op, None);
58 }
59
60 pub fn issue_tlb_flush_with(
68 &mut self,
69 op: TlbFlushOp,
70 drop_after_flush: Frame<dyn AnyFrameMeta>,
71 ) {
72 self.ops_stack.push(op, Some(drop_after_flush));
73 }
74
75 pub fn dispatch_tlb_flush(&mut self) {
82 let irq_guard = crate::irq::disable_local();
83
84 if self.ops_stack.is_empty() {
85 return;
86 }
87
88 let mut target_cpus = self.target_cpus.load(Ordering::Release);
91
92 let cur_cpu = irq_guard.current_cpu();
93 let mut need_flush_on_self = false;
94
95 if target_cpus.contains(cur_cpu) {
96 target_cpus.remove(cur_cpu);
97 need_flush_on_self = true;
98 }
99
100 for cpu in target_cpus.iter() {
101 {
102 let mut flush_ops = FLUSH_OPS.get_on_cpu(cpu).lock();
103 flush_ops.push_from(&self.ops_stack);
104
105 ACK_REMOTE_FLUSH
107 .get_on_cpu(cpu)
108 .store(false, Ordering::Relaxed);
109 }
110 self.have_unsynced_flush.add(cpu);
111 }
112
113 crate::smp::inter_processor_call(&target_cpus, do_remote_flush);
114
115 if need_flush_on_self {
117 self.ops_stack.flush_all();
118 } else {
119 self.ops_stack.clear_without_flush();
120 }
121 }
122
123 pub fn sync_tlb_flush(&mut self) {
139 assert!(
140 irq::is_local_enabled(),
141 "Waiting for remote flush with IRQs disabled"
142 );
143
144 for cpu in self.have_unsynced_flush.iter() {
145 while !ACK_REMOTE_FLUSH.get_on_cpu(cpu).load(Ordering::Relaxed) {
146 core::hint::spin_loop();
147 }
148 }
149
150 self.have_unsynced_flush = CpuSet::new_empty();
151 }
152}
153
154#[derive(Debug, Clone, PartialEq, Eq)]
165pub struct TlbFlushOp(Vaddr);
166
167const_assert!(TlbFlushOp::FLUSH_RANGE_NPAGES_MASK | (PAGE_SIZE - 1) == PAGE_SIZE - 1);
171
172impl TlbFlushOp {
173 const FLUSH_ALL_VAL: Vaddr = Vaddr::MAX;
174 const FLUSH_RANGE_NPAGES_MASK: Vaddr =
175 (1 << (usize::BITS - FLUSH_ALL_PAGES_THRESHOLD.leading_zeros())) - 1;
176
177 pub fn perform_on_current(&self) {
179 use crate::arch::mm::{
180 tlb_flush_addr, tlb_flush_addr_range, tlb_flush_all_excluding_global,
181 };
182 match self.0 {
183 Self::FLUSH_ALL_VAL => tlb_flush_all_excluding_global(),
184 addr => {
185 let start = addr & !Self::FLUSH_RANGE_NPAGES_MASK;
186 let num_pages = addr & Self::FLUSH_RANGE_NPAGES_MASK;
187
188 debug_assert!((addr & (PAGE_SIZE - 1)) < FLUSH_ALL_PAGES_THRESHOLD);
189 debug_assert!(num_pages != 0);
190
191 if num_pages == 1 {
192 tlb_flush_addr(start);
193 } else {
194 tlb_flush_addr_range(&(start..start + num_pages * PAGE_SIZE));
195 }
196 }
197 }
198 }
199
200 pub const fn for_all() -> Self {
203 TlbFlushOp(Self::FLUSH_ALL_VAL)
204 }
205
206 pub const fn for_single(addr: Vaddr) -> Self {
209 TlbFlushOp(addr | 1)
210 }
211
212 pub const fn for_range(range: Range<Vaddr>) -> Self {
222 assert!(
223 range.start.is_multiple_of(PAGE_SIZE),
224 "Range start must be page-aligned"
225 );
226 assert!(
227 range.end.is_multiple_of(PAGE_SIZE),
228 "Range end must be page-aligned"
229 );
230 assert!(range.start < range.end, "Range must not be empty");
231 let num_pages = (range.end - range.start) / PAGE_SIZE;
232 if num_pages >= FLUSH_ALL_PAGES_THRESHOLD {
233 return TlbFlushOp::for_all();
234 }
235 TlbFlushOp(range.start | (num_pages as Vaddr))
236 }
237
238 fn num_pages(&self) -> u32 {
244 if self.0 == Self::FLUSH_ALL_VAL {
245 u32::MAX
246 } else {
247 debug_assert!((self.0 & (PAGE_SIZE - 1)) < FLUSH_ALL_PAGES_THRESHOLD);
248 let num_pages = (self.0 & Self::FLUSH_RANGE_NPAGES_MASK) as u32;
249 debug_assert!(num_pages != 0);
250 num_pages
251 }
252 }
253}
254
255cpu_local! {
257 static FLUSH_OPS: SpinLock<OpsStack, LocalIrqDisabled> = SpinLock::new(OpsStack::new());
258 static ACK_REMOTE_FLUSH: AtomicBool = AtomicBool::new(true);
260}
261
262fn do_remote_flush() {
263 let current_cpu = crate::cpu::CpuId::current_racy();
265
266 let mut new_op_queue = OpsStack::new();
267 {
268 let mut op_queue = FLUSH_OPS.get_on_cpu(current_cpu).lock();
269
270 core::mem::swap(&mut *op_queue, &mut new_op_queue);
271
272 ACK_REMOTE_FLUSH
274 .get_on_cpu(current_cpu)
275 .store(true, Ordering::Relaxed);
276 }
277 new_op_queue.flush_all();
280}
281
282const FLUSH_ALL_PAGES_THRESHOLD: usize = 32;
285
286struct OpsStack {
287 ops: [MaybeUninit<TlbFlushOp>; FLUSH_ALL_PAGES_THRESHOLD],
289 num_ops: u32,
290 num_pages_to_flush: u32,
295 page_keeper: Vec<Frame<dyn AnyFrameMeta>>,
296}
297
298impl OpsStack {
299 const fn new() -> Self {
300 Self {
301 ops: [const { MaybeUninit::uninit() }; FLUSH_ALL_PAGES_THRESHOLD],
302 num_ops: 0,
303 num_pages_to_flush: 0,
304 page_keeper: Vec::new(),
305 }
306 }
307
308 fn is_empty(&self) -> bool {
309 self.num_ops == 0 && self.num_pages_to_flush == 0
310 }
311
312 fn need_flush_all(&self) -> bool {
313 self.num_pages_to_flush == u32::MAX
314 }
315
316 fn push(&mut self, op: TlbFlushOp, drop_after_flush: Option<Frame<dyn AnyFrameMeta>>) {
317 if let Some(frame) = drop_after_flush {
318 self.page_keeper.push(frame);
319 }
320
321 if self.need_flush_all() {
322 return;
323 }
324 let op_num_pages = op.num_pages();
325 if op == TlbFlushOp::for_all()
326 || self.num_pages_to_flush + op_num_pages >= FLUSH_ALL_PAGES_THRESHOLD as u32
327 {
328 self.num_pages_to_flush = u32::MAX;
329 self.num_ops = 0;
330 return;
331 }
332
333 self.ops[self.num_ops as usize].write(op);
334 self.num_ops += 1;
335 self.num_pages_to_flush += op_num_pages;
336 }
337
338 fn push_from(&mut self, other: &OpsStack) {
339 self.page_keeper.extend(other.page_keeper.iter().cloned());
340
341 if self.need_flush_all() {
342 return;
343 }
344 if other.need_flush_all()
345 || self.num_pages_to_flush + other.num_pages_to_flush
346 >= FLUSH_ALL_PAGES_THRESHOLD as u32
347 {
348 self.num_pages_to_flush = u32::MAX;
349 self.num_ops = 0;
350 return;
351 }
352
353 for other_op in other.ops_iter() {
354 self.ops[self.num_ops as usize].write(other_op.clone());
355 self.num_ops += 1;
356 }
357 self.num_pages_to_flush += other.num_pages_to_flush;
358 }
359
360 fn flush_all(&mut self) {
361 if self.need_flush_all() {
362 crate::arch::mm::tlb_flush_all_excluding_global();
363 } else {
364 self.ops_iter().for_each(|op| {
365 op.perform_on_current();
366 });
367 }
368
369 self.clear_without_flush();
370 }
371
372 fn clear_without_flush(&mut self) {
373 self.num_pages_to_flush = 0;
374 self.num_ops = 0;
375 self.page_keeper.clear();
376 }
377
378 fn ops_iter(&self) -> impl Iterator<Item = &TlbFlushOp> {
379 self.ops.iter().take(self.num_ops as usize).map(|op| {
380 unsafe { op.assume_init_ref() }
382 })
383 }
384}