ostd/mm/
tlb.rs

1// SPDX-License-Identifier: MPL-2.0
2//! TLB flush operations.
3use alloc::vec::Vec;
4use core::{
5    ops::Range,
6    sync::atomic::{AtomicBool, Ordering},
7};
8
9use super::{
10    frame::{meta::AnyFrameMeta, Frame},
11    Vaddr, PAGE_SIZE,
12};
13
14use crate::specs::mm::cpu::{AtomicCpuSet, CpuSet, PinCurrentCpu};
15/*use crate::{
16    arch::irq,
17    cpu::{AtomicCpuSet, CpuSet, PinCurrentCpu},
18    cpu_local,
19    sync::{LocalIrqDisabled, SpinLock},
20};*/
21
22/// A TLB flusher that is aware of which CPUs are needed to be flushed.
23///
24/// The flusher needs to stick to the current CPU.
25pub struct TlbFlusher<'a, G: PinCurrentCpu> {
26    target_cpus: &'a AtomicCpuSet,
27    have_unsynced_flush: CpuSet,
28    ops_stack: OpsStack,
29    _pin_current: G,
30}
31
32impl<'a, G: PinCurrentCpu> TlbFlusher<'a, G> {
33    /// Creates a new TLB flusher with the specified CPUs to be flushed.
34    ///
35    /// The target CPUs should be a reference to an [`AtomicCpuSet`] that will
36    /// be loaded upon [`Self::dispatch_tlb_flush`].
37    ///
38    /// The flusher needs to stick to the current CPU. So please provide a
39    /// guard that implements [`PinCurrentCpu`].
40    pub fn new(target_cpus: &'a AtomicCpuSet, pin_current_guard: G) -> Self {
41        Self {
42            target_cpus,
43            have_unsynced_flush: CpuSet::new_empty(),
44            ops_stack: OpsStack::new(),
45            _pin_current: pin_current_guard,
46        }
47    }
48
49    /// Issues a pending TLB flush request.
50    ///
51    /// This function does not guarantee to flush the TLB entries on either
52    /// this CPU or remote CPUs. The flush requests are only performed when
53    /// [`Self::dispatch_tlb_flush`] is called.
54    pub fn issue_tlb_flush<M: AnyFrameMeta>(&mut self, op: TlbFlushOp) {
55        self.ops_stack.push(op, None::<Frame<M>>);
56    }
57
58    /// Issues a TLB flush request that must happen before dropping the page.
59    ///
60    /// If we need to remove a mapped page from the page table, we can only
61    /// recycle the page after all the relevant TLB entries in all CPUs are
62    /// flushed. Otherwise if the page is recycled for other purposes, the user
63    /// space program can still access the page through the TLB entries. This
64    /// method is designed to be used in such cases.
65    pub fn issue_tlb_flush_with<M: AnyFrameMeta>(
66        &mut self,
67        op: TlbFlushOp,
68        drop_after_flush: Frame<M>,
69    ) {
70        self.ops_stack.push(op, Some(drop_after_flush));
71    }
72
73    /// Dispatches all the pending TLB flush requests.
74    ///
75    /// All previous pending requests issued by [`Self::issue_tlb_flush`] or
76    /// [`Self::issue_tlb_flush_with`] starts to be processed after this
77    /// function. But it may not be synchronous. Upon the return of this
78    /// function, the TLB entries may not be coherent.
79    pub fn dispatch_tlb_flush(&mut self) {
80        unimplemented!()
81        /*let irq_guard = crate::trap::irq::disable_local();
82
83        if self.ops_stack.is_empty() {
84            return;
85        }
86
87        // `Release` to make sure our modification on the PT is visible to CPUs
88        // that are going to activate the PT.
89        let mut target_cpus = self.target_cpus.load(Ordering::Release);
90
91        let cur_cpu = irq_guard.current_cpu();
92        let mut need_flush_on_self = false;
93
94        if target_cpus.contains(cur_cpu) {
95            target_cpus.remove(cur_cpu);
96            need_flush_on_self = true;
97        }
98
99        for cpu in target_cpus.iter() {
100            {
101                let mut flush_ops = FLUSH_OPS.get_on_cpu(cpu).lock();
102                flush_ops.push_from(&self.ops_stack);
103
104                // Clear ACK before dropping the lock to avoid false ACKs.
105                ACK_REMOTE_FLUSH
106                    .get_on_cpu(cpu)
107                    .store(false, Ordering::Relaxed);
108            }
109            self.have_unsynced_flush.add(cpu);
110        }
111
112        crate::smp::inter_processor_call(&target_cpus, do_remote_flush);
113
114        // Flush ourselves after sending all IPIs to save some time.
115        if need_flush_on_self {
116            self.ops_stack.flush_all();
117        } else {
118            self.ops_stack.clear_without_flush();
119        }*/
120    }
121
122    /// Waits for all the previous TLB flush requests to be completed.
123    ///
124    /// After this function, all TLB entries corresponding to previous
125    /// dispatched TLB flush requests are guaranteed to be coherent.
126    ///
127    /// The TLB flush requests are issued with [`Self::issue_tlb_flush`] and
128    /// dispatched with [`Self::dispatch_tlb_flush`]. This method will not
129    /// dispatch any issued requests so it will not guarantee TLB coherence
130    /// of requests that are not dispatched.
131    ///
132    /// # Panics
133    ///
134    /// This method panics if the IRQs are disabled. Since the remote flush are
135    /// processed in IRQs, two CPUs may deadlock if they are waiting for each
136    /// other's TLB coherence.
137    pub fn sync_tlb_flush(&mut self) {
138        unimplemented!()
139        /*
140        assert!(
141            irq::is_local_enabled(),
142            "Waiting for remote flush with IRQs disabled"
143        );
144
145        for cpu in self.have_unsynced_flush.iter() {
146            while !ACK_REMOTE_FLUSH.get_on_cpu(cpu).load(Ordering::Relaxed) {
147                core::hint::spin_loop();
148            }
149        }
150
151        self.have_unsynced_flush = CpuSet::new_empty();
152        */
153    }
154}
155
156/// The operation to flush TLB entries.
157#[derive(Debug, Clone, PartialEq, Eq)]
158pub enum TlbFlushOp {
159    /// Flush all TLB entries except for the global entries.
160    All,
161    /// Flush the TLB entry for the specified virtual address.
162    Address(Vaddr),
163    /// Flush the TLB entries for the specified virtual address range.
164    Range(Range<Vaddr>),
165}
166
167impl TlbFlushOp {
168    /// Performs the TLB flush operation on the current CPU.
169    pub fn perform_on_current(&self) {
170        unimplemented!()
171        /*use crate::arch::mm::{
172            tlb_flush_addr, tlb_flush_addr_range, tlb_flush_all_excluding_global,
173        };
174        match self {
175            TlbFlushOp::All => tlb_flush_all_excluding_global(),
176            TlbFlushOp::Address(addr) => tlb_flush_addr(*addr),
177            TlbFlushOp::Range(range) => tlb_flush_addr_range(range),
178        }*/
179    }
180
181    fn optimize_for_large_range(self) -> Self {
182        match self {
183            TlbFlushOp::Range(range) => {
184                if range.len() > FLUSH_ALL_RANGE_THRESHOLD {
185                    TlbFlushOp::All
186                } else {
187                    TlbFlushOp::Range(range)
188                }
189            }
190            _ => self,
191        }
192    }
193}
194
195// The queues of pending requests on each CPU.
196/*cpu_local! {
197    static FLUSH_OPS: SpinLock<OpsStack, LocalIrqDisabled> = SpinLock::new(OpsStack::new());
198    /// Whether this CPU finishes the last remote flush request.
199    static ACK_REMOTE_FLUSH: AtomicBool = AtomicBool::new(true);
200}*/
201
202fn do_remote_flush() {
203    unimplemented!()
204    /*
205    // No races because we are in IRQs or have disabled preemption.
206    let current_cpu = crate::cpu::CpuId::current_racy();
207
208    let mut new_op_queue = OpsStack::new();
209    {
210        let mut op_queue = FLUSH_OPS.get_on_cpu(current_cpu).lock();
211
212        core::mem::swap(&mut *op_queue, &mut new_op_queue);
213
214        // ACK before dropping the lock so that we won't miss flush requests.
215        ACK_REMOTE_FLUSH
216            .get_on_cpu(current_cpu)
217            .store(true, Ordering::Relaxed);
218    }
219    // Unlock the locks quickly to avoid contention. ACK before flushing is
220    // fine since we cannot switch back to userspace now.
221    new_op_queue.flush_all();
222    */
223}
224
225/// If a TLB flushing request exceeds this threshold, we flush all.
226const FLUSH_ALL_RANGE_THRESHOLD: usize = 32 * PAGE_SIZE();
227
228/// If the number of pending requests exceeds this threshold, we flush all the
229/// TLB entries instead of flushing them one by one.
230const FLUSH_ALL_OPS_THRESHOLD: usize = 32;
231
232struct OpsStack {
233    ops: [Option<TlbFlushOp>; FLUSH_ALL_OPS_THRESHOLD],
234    need_flush_all: bool,
235    size: usize,
236    //    page_keeper: Vec<Frame<dyn AnyFrameMeta>>,
237}
238
239impl OpsStack {
240    const fn new() -> Self {
241        Self {
242            ops: [const { None }; FLUSH_ALL_OPS_THRESHOLD],
243            need_flush_all: false,
244            size: 0,
245            //            page_keeper: Vec::new(),
246        }
247    }
248
249    fn is_empty(&self) -> bool {
250        !self.need_flush_all && self.size == 0
251    }
252
253    fn push<M: AnyFrameMeta>(&mut self, op: TlbFlushOp, drop_after_flush: Option<Frame<M>>) {
254        if let Some(frame) = drop_after_flush {
255            //            self.page_keeper.push(frame);
256        }
257
258        if self.need_flush_all {
259            return;
260        }
261        let op = op.optimize_for_large_range();
262        if op == TlbFlushOp::All || self.size >= FLUSH_ALL_OPS_THRESHOLD {
263            self.need_flush_all = true;
264            self.size = 0;
265            return;
266        }
267
268        self.ops[self.size] = Some(op);
269        self.size += 1;
270    }
271
272    fn push_from(&mut self, other: &OpsStack) {
273        //        self.page_keeper.extend(other.page_keeper.iter().cloned());
274
275        if self.need_flush_all {
276            return;
277        }
278        if other.need_flush_all || self.size + other.size >= FLUSH_ALL_OPS_THRESHOLD {
279            self.need_flush_all = true;
280            self.size = 0;
281            return;
282        }
283
284        for i in 0..other.size {
285            self.ops[self.size] = other.ops[i].clone();
286            self.size += 1;
287        }
288    }
289
290    fn flush_all(&mut self) {
291        if self.need_flush_all {
292            unimplemented!()
293            /*crate::arch::mm::tlb_flush_all_excluding_global();*/
294        } else {
295            for i in 0..self.size {
296                if let Some(op) = &self.ops[i] {
297                    op.perform_on_current();
298                }
299            }
300        }
301
302        self.clear_without_flush();
303    }
304
305    fn clear_without_flush(&mut self) {
306        self.need_flush_all = false;
307        self.size = 0;
308        //        self.page_keeper.clear();
309    }
310}